diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000000..99262ca894 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,9 @@ +.dockerignore +.git/ +.github/ +.gitignore +.idea/ +README.md +Dockerfile +f +target/ diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 95e3fb3444..91cf374c79 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -3,14 +3,15 @@ # Please see the documentation for all configuration options: # https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates -version: 2 -updates: -- package-ecosystem: cargo - directory: "/" - schedule: - interval: daily - time: "01:00" - timezone: America/Los_Angeles - #labels: - # - "automerge" - open-pull-requests-limit: 6 +# NOTE: Jito-Solana ignores this as we pull in upstream dependabot merges +#version: 2 +#updates: +#- package-ecosystem: cargo +# directory: "/" +# schedule: +# interval: daily +# time: "01:00" +# timezone: America/Los_Angeles +# #labels: +# # - "automerge" +# open-pull-requests-limit: 6 diff --git a/.github/workflows/cargo.yml b/.github/workflows/cargo.yml index d0bad722e0..d9aebe1830 100644 --- a/.github/workflows/cargo.yml +++ b/.github/workflows/cargo.yml @@ -34,6 +34,8 @@ jobs: runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v4 + with: + submodules: 'recursive' - uses: mozilla-actions/sccache-action@v0.0.3 with: @@ -56,6 +58,8 @@ jobs: runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v4 + with: + submodules: 'recursive' - uses: mozilla-actions/sccache-action@v0.0.3 with: diff --git a/.github/workflows/client-targets.yml b/.github/workflows/client-targets.yml index 97118918ef..aacb52629d 100644 --- a/.github/workflows/client-targets.yml +++ b/.github/workflows/client-targets.yml @@ -32,6 +32,8 @@ jobs: runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v3 + with: + submodules: 'recursive' - run: cargo install cargo-ndk@2.12.2 @@ -56,6 +58,8 @@ jobs: runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v3 + with: + submodules: 'recursive' - name: Setup Rust run: | diff --git a/.github/workflows/crate-check.yml b/.github/workflows/crate-check.yml index a47e7cde5f..9b57d633ad 100644 --- a/.github/workflows/crate-check.yml +++ b/.github/workflows/crate-check.yml @@ -18,6 +18,7 @@ jobs: - uses: actions/checkout@v3 with: fetch-depth: 0 + submodules: 'recursive' - name: Get commit range (push) if: ${{ github.event_name == 'push' }} diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index fb2096bd33..e5ac907ea1 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -22,6 +22,7 @@ jobs: uses: actions/checkout@v3 with: fetch-depth: 0 + submodules: 'recursive' - name: Get commit range (push) if: ${{ github.event_name == 'push' }} @@ -77,6 +78,8 @@ jobs: steps: - name: Checkout uses: actions/checkout@v3 + with: + submodules: 'recursive' - name: Setup Node uses: actions/setup-node@v3 diff --git a/.github/workflows/downstream-project-spl.yml b/.github/workflows/downstream-project-spl.yml index 6afd398f43..2768e253f1 100644 --- a/.github/workflows/downstream-project-spl.yml +++ b/.github/workflows/downstream-project-spl.yml @@ -37,6 +37,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 + with: + submodules: 'recursive' - shell: bash run: | @@ -86,6 +88,8 @@ jobs: ] steps: - uses: actions/checkout@v3 + with: + submodules: 'recursive' - shell: bash run: | @@ -137,6 +141,8 @@ jobs: steps: - uses: actions/checkout@v3 + with: + submodules: 'recursive' - shell: bash run: | diff --git a/.github/workflows/increment-cargo-version-on-release.yml b/.github/workflows/increment-cargo-version-on-release.yml index 5592d76ca5..ca55af2155 100644 --- a/.github/workflows/increment-cargo-version-on-release.yml +++ b/.github/workflows/increment-cargo-version-on-release.yml @@ -11,6 +11,8 @@ jobs: steps: - name: Checkout Repository uses: actions/checkout@v3 + with: + submodules: 'recursive' # This script confirms two assumptions: # 1) Tag should be branch. diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index 98dc697920..fc98a5895b 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -26,6 +26,7 @@ jobs: with: ref: master fetch-depth: 0 + submodules: 'recursive' - name: Setup Rust shell: bash diff --git a/.gitignore b/.gitignore index 3167a9d720..f891833b15 100644 --- a/.gitignore +++ b/.gitignore @@ -4,7 +4,7 @@ /solana-release.tar.bz2 /solana-metrics/ /solana-metrics.tar.bz2 -/target/ +**/target/ /test-ledger/ **/*.rs.bk @@ -27,7 +27,11 @@ log-*/ # fetch-spl.sh artifacts /spl-genesis-args.sh /spl_*.so +/jito_*.so .DS_Store # scripts that may be generated by cargo *-bpf commands **/cargo-*-bpf-child-script-*.sh + +.env +docker-output/ diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000000..e31fc7fccd --- /dev/null +++ b/.gitmodules @@ -0,0 +1,9 @@ +[submodule "anchor"] + path = anchor + url = https://github.com/jito-foundation/anchor.git +[submodule "jito-programs"] + path = jito-programs + url = https://github.com/jito-foundation/jito-programs.git +[submodule "jito-protos/protos"] + path = jito-protos/protos + url = https://github.com/jito-labs/mev-protos.git diff --git a/Cargo.lock b/Cargo.lock index bd9f369ec7..bcfc8880b1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -126,6 +126,145 @@ dependencies = [ "alloc-no-stdlib", ] +[[package]] +name = "anchor-attribute-access-control" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "anyhow", + "proc-macro2 1.0.69", + "quote 1.0.33", + "regex", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-account" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "anyhow", + "bs58 0.4.0", + "proc-macro2 1.0.69", + "quote 1.0.33", + "rustversion", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-constant" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "proc-macro2 1.0.69", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-error" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "proc-macro2 1.0.69", + "quote 1.0.33", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-event" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "anyhow", + "proc-macro2 1.0.69", + "quote 1.0.33", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-interface" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "anyhow", + "heck 0.3.3", + "proc-macro2 1.0.69", + "quote 1.0.33", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-program" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "anyhow", + "proc-macro2 1.0.69", + "quote 1.0.33", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-state" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "anyhow", + "proc-macro2 1.0.69", + "quote 1.0.33", + "syn 1.0.109", +] + +[[package]] +name = "anchor-derive-accounts" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "anyhow", + "proc-macro2 1.0.69", + "quote 1.0.33", + "syn 1.0.109", +] + +[[package]] +name = "anchor-lang" +version = "0.24.2" +dependencies = [ + "anchor-attribute-access-control", + "anchor-attribute-account", + "anchor-attribute-constant", + "anchor-attribute-error", + "anchor-attribute-event", + "anchor-attribute-interface", + "anchor-attribute-program", + "anchor-attribute-state", + "anchor-derive-accounts", + "arrayref", + "base64 0.13.1", + "bincode", + "borsh 0.10.3", + "bytemuck", + "solana-program", + "thiserror", +] + +[[package]] +name = "anchor-syn" +version = "0.24.2" +dependencies = [ + "anyhow", + "bs58 0.3.1", + "heck 0.3.3", + "proc-macro2 1.0.69", + "proc-macro2-diagnostics", + "quote 1.0.33", + "serde", + "serde_json", + "sha2 0.9.9", + "syn 1.0.109", + "thiserror", +] + [[package]] name = "android-tzdata" version = "0.1.1" @@ -156,12 +295,55 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "anstream" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is-terminal", + "utf8parse", +] + [[package]] name = "anstyle" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" +[[package]] +name = "anstyle-parse" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "anstyle-wincon" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c677ab05e09154296dd37acecd46420c17b9713e8366facafa8fc0885167cf4c" +dependencies = [ + "anstyle", + "windows-sys 0.48.0", +] + [[package]] name = "anyhow" version = "1.0.75" @@ -228,7 +410,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" dependencies = [ - "quote", + "quote 1.0.33", "syn 1.0.109", ] @@ -240,8 +422,8 @@ checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" dependencies = [ "num-bigint 0.4.4", "num-traits", - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 1.0.109", ] @@ -276,8 +458,8 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 1.0.109", ] @@ -337,8 +519,8 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 1.0.109", "synstructure", ] @@ -349,8 +531,8 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 1.0.109", ] @@ -424,8 +606,8 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "648ed8c8d2ce5409ccd57453d9d1b214b342a0d69376a6feda1fd6cae3299308" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 1.0.109", ] @@ -435,8 +617,8 @@ version = "0.1.74" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 2.0.38", ] @@ -586,8 +768,8 @@ dependencies = [ "lazycell", "peeking_take_while", "prettyplease 0.2.4", - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "regex", "rustc-hash", "shlex", @@ -722,7 +904,7 @@ dependencies = [ "borsh-derive-internal 0.9.3", "borsh-schema-derive-internal 0.9.3", "proc-macro-crate 0.1.5", - "proc-macro2", + "proc-macro2 1.0.69", "syn 1.0.109", ] @@ -735,7 +917,7 @@ dependencies = [ "borsh-derive-internal 0.10.3", "borsh-schema-derive-internal 0.10.3", "proc-macro-crate 0.1.5", - "proc-macro2", + "proc-macro2 1.0.69", "syn 1.0.109", ] @@ -745,8 +927,8 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5449c28a7b352f2d1e592a8a28bf139bc71afb0764a14f3c02500935d8c44065" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 1.0.109", ] @@ -756,8 +938,8 @@ version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afb438156919598d2c7bad7e1c0adf3d26ed3840dbc010db1a882a65583ca2fb" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 1.0.109", ] @@ -767,8 +949,8 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cdbd5696d8bfa21d53d9fe39a714a18538bad11492a42d066dbbc395fb1951c0" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 1.0.109", ] @@ -778,8 +960,8 @@ version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "634205cc43f74a1b9046ef87c4540ebda95696ec0f315024860cad7c5b0f5ccd" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 1.0.109", ] @@ -804,6 +986,12 @@ dependencies = [ "alloc-stdlib", ] +[[package]] +name = "bs58" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "476e9cd489f9e121e02ffa6014a8ef220ecb15c05ed23fc34cca13925dc283fb" + [[package]] name = "bs58" version = "0.4.0" @@ -884,8 +1072,8 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1aca418a974d83d40a0c1f0c5cba6ff4bc28d8df099109ca459a2118d40b6322" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 1.0.109", ] @@ -1110,7 +1298,7 @@ checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5" dependencies = [ "atty", "bitflags 1.3.2", - "clap_derive", + "clap_derive 3.2.18", "clap_lex 0.2.4", "indexmap 1.9.3", "once_cell", @@ -1126,6 +1314,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c27cdf28c0f604ba3f512b0c9a409f8de8513e4816705deb0498b627e7c3a3fd" dependencies = [ "clap_builder", + "clap_derive 4.3.12", + "once_cell", ] [[package]] @@ -1134,8 +1324,10 @@ version = "4.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08a9f1ab5e9f01a9b81f202e8562eb9a10de70abf9eaeac1be465c28b75aa4aa" dependencies = [ + "anstream", "anstyle", "clap_lex 0.5.0", + "strsim 0.10.0", ] [[package]] @@ -1144,13 +1336,25 @@ version = "3.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea0c8bce528c4be4da13ea6fead8965e95b6073585a2f05204bd8f4119f82a65" dependencies = [ - "heck", + "heck 0.4.0", "proc-macro-error", - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 1.0.109", ] +[[package]] +name = "clap_derive" +version = "4.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54a9bb5758fc5dfe728d1019941681eccaf0cf8a4189b692a0ee2f2ecf90a050" +dependencies = [ + "heck 0.4.0", + "proc-macro2 1.0.69", + "quote 1.0.33", + "syn 2.0.38", +] + [[package]] name = "clap_lex" version = "0.2.4" @@ -1166,6 +1370,12 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" +[[package]] +name = "colorchoice" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" + [[package]] name = "combine" version = "3.8.1" @@ -1242,9 +1452,9 @@ version = "0.2.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7f6ff08fd20f4f299298a28e2dfa8a8ba1036e6cd2460ac1de7b425d76f2500" dependencies = [ - "proc-macro2", - "quote", - "unicode-xid", + "proc-macro2 1.0.69", + "quote 1.0.33", + "unicode-xid 0.2.2", ] [[package]] @@ -1494,8 +1704,8 @@ checksum = "ab8bfa2e259f8ee1ce5e97824a3c55ec4404a0d772ca7fa96bf19f0752a046eb" dependencies = [ "fnv", "ident_case", - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "strsim 0.10.0", "syn 2.0.38", ] @@ -1507,7 +1717,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", - "quote", + "quote 1.0.33", "syn 2.0.38", ] @@ -1541,6 +1751,17 @@ version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" +[[package]] +name = "default-env" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f753eb82d29277e79efc625e84aecacfd4851ee50e05a8573a4740239a77bfd3" +dependencies = [ + "proc-macro2 0.4.30", + "quote 0.6.13", + "syn 0.15.44", +] + [[package]] name = "der" version = "0.5.1" @@ -1576,8 +1797,8 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 1.0.109", ] @@ -1588,8 +1809,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "40eebddd2156ce1bb37b20bbe5151340a31828b1f2d22ba4141f3531710e38df" dependencies = [ "convert_case", - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "rustc_version 0.3.3", "syn 1.0.109", ] @@ -1677,8 +1898,8 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3bf95dc3f046b9da4f2d51833c0d3547d8564ef6910f5c1ed130306a75b92886" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 1.0.109", ] @@ -1700,8 +1921,8 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 2.0.38", ] @@ -1759,8 +1980,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f86b50932a01e7ec5c06160492ab660fb19b6bb2a7878030dd6cd68d21df9d4d" dependencies = [ "enum-ordinalize", - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 1.0.109", ] @@ -1800,8 +2021,8 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 2.0.38", ] @@ -1813,8 +2034,8 @@ checksum = "0b166c9e378360dd5a6666a9604bb4f54ae0cac39023ffbac425e917a2a04fef" dependencies = [ "num-bigint 0.4.4", "num-traits", - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 1.0.109", ] @@ -2075,8 +2296,8 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 2.0.38", ] @@ -2353,6 +2574,15 @@ dependencies = [ "http", ] +[[package]] +name = "heck" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" +dependencies = [ + "unicode-segmentation", +] + [[package]] name = "heck" version = "0.4.0" @@ -2692,6 +2922,49 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +[[package]] +name = "jito-programs-vote-state" +version = "0.1.5" +dependencies = [ + "anchor-lang", + "bincode", + "serde", + "serde_derive", + "solana-program", +] + +[[package]] +name = "jito-protos" +version = "1.17.22" +dependencies = [ + "bytes", + "prost", + "prost-types", + "protobuf-src", + "tonic", + "tonic-build", +] + +[[package]] +name = "jito-tip-distribution" +version = "0.1.5" +dependencies = [ + "anchor-lang", + "default-env", + "jito-programs-vote-state", + "solana-program", + "solana-security-txt", +] + +[[package]] +name = "jito-tip-payment" +version = "0.1.5" +dependencies = [ + "anchor-lang", + "default-env", + "solana-security-txt", +] + [[package]] name = "jobserver" version = "0.1.24" @@ -2772,8 +3045,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b939a78fa820cdfcb7ee7484466746a7377760970f6f9c6fe19f9edcc8a38d2" dependencies = [ "proc-macro-crate 0.1.5", - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 1.0.109", ] @@ -3166,8 +3439,8 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a7d5f7076603ebc68de2dc6a650ec331a062a13abaa346975be747bbfa4b789" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 1.0.109", ] @@ -3299,8 +3572,8 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 1.0.109", ] @@ -3310,8 +3583,8 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfb77679af88f8b125209d354a202862602672222e7f2313fdd6dc349bad4712" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 2.0.38", ] @@ -3402,8 +3675,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799" dependencies = [ "proc-macro-crate 1.1.0", - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 1.0.109", ] @@ -3414,8 +3687,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" dependencies = [ "proc-macro-crate 1.1.0", - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 2.0.38", ] @@ -3426,8 +3699,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c11e44798ad209ccdd91fc192f0526a369a01234f7373e1b141c96d7cee4f0e" dependencies = [ "proc-macro-crate 1.1.0", - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 2.0.38", ] @@ -3509,8 +3782,8 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 1.0.109", ] @@ -3585,8 +3858,8 @@ checksum = "5f7d21ccd03305a674437ee1248f3ab5d4b1db095cf1caf49f1713ddf61956b7" dependencies = [ "Inflector", "proc-macro-error", - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 1.0.109", ] @@ -3739,8 +4012,8 @@ checksum = "99b8db626e31e5b81787b9783425769681b347011cc59471e33ea46d2ea0cf55" dependencies = [ "pest", "pest_meta", - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 1.0.109", ] @@ -3790,8 +4063,8 @@ version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 1.0.109", ] @@ -3924,7 +4197,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b83ec2d0af5c5c556257ff52c9f98934e243b9fd39604bfb2a9b75ec2e97f18" dependencies = [ - "proc-macro2", + "proc-macro2 1.0.69", "syn 1.0.109", ] @@ -3934,7 +4207,7 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ - "proc-macro2", + "proc-macro2 1.0.69", "syn 2.0.38", ] @@ -3964,8 +4237,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 1.0.109", "version_check", ] @@ -3976,11 +4249,20 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "version_check", ] +[[package]] +name = "proc-macro2" +version = "0.4.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" +dependencies = [ + "unicode-xid 0.1.0", +] + [[package]] name = "proc-macro2" version = "1.0.69" @@ -3990,6 +4272,19 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "proc-macro2-diagnostics" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bf29726d67464d49fa6224a1d07936a8c08bb3fba727c7493f6cf1616fdaada" +dependencies = [ + "proc-macro2 1.0.69", + "quote 1.0.33", + "syn 1.0.109", + "version_check", + "yansi", +] + [[package]] name = "proptest" version = "1.3.1" @@ -4027,7 +4322,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes", - "heck", + "heck 0.4.0", "itertools", "lazy_static", "log", @@ -4050,8 +4345,8 @@ checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools", - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 1.0.109", ] @@ -4096,8 +4391,8 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 2.0.38", ] @@ -4155,13 +4450,22 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "quote" +version = "0.6.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" +dependencies = [ + "proc-macro2 0.4.30", +] + [[package]] name = "quote" version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ - "proc-macro2", + "proc-macro2 1.0.69", ] [[package]] @@ -4576,7 +4880,7 @@ checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" dependencies = [ "log", "ring", - "rustls-webpki", + "rustls-webpki 0.101.4", "sct", ] @@ -4610,6 +4914,16 @@ dependencies = [ "base64 0.13.1", ] +[[package]] +name = "rustls-webpki" +version = "0.100.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6a5fc258f1c1276dfe3016516945546e2d5383911efc0fc4f1cdc5df3a4ae3" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "rustls-webpki" version = "0.101.4" @@ -4684,8 +4998,8 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bdbda6ac5cd1321e724fa9cee216f3a61885889b896f073b8f82322789c5250e" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 1.0.109", ] @@ -4773,8 +5087,8 @@ version = "1.0.189" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 2.0.38", ] @@ -4818,8 +5132,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "881b6f881b17d13214e5d494c939ebab463d01264ce1811e9d4ac3a882e7695f" dependencies = [ "darling", - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 2.0.38", ] @@ -4868,8 +5182,8 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 2.0.38", ] @@ -5105,7 +5419,7 @@ dependencies = [ "assert_matches", "base64 0.21.4", "bincode", - "bs58", + "bs58 0.4.0", "bv", "lazy_static", "serde", @@ -5320,6 +5634,7 @@ dependencies = [ "solana-accounts-db", "solana-banks-interface", "solana-client", + "solana-gossip", "solana-runtime", "solana-sdk", "solana-send-transaction-service", @@ -5448,6 +5763,27 @@ dependencies = [ "tempfile", ] +[[package]] +name = "solana-bundle" +version = "1.17.22" +dependencies = [ + "anchor-lang", + "assert_matches", + "itertools", + "log", + "serde", + "solana-accounts-db", + "solana-ledger", + "solana-logger", + "solana-measure", + "solana-poh", + "solana-program-runtime", + "solana-runtime", + "solana-sdk", + "solana-transaction-status", + "thiserror", +] + [[package]] name = "solana-cargo-build-bpf" version = "1.17.22" @@ -5533,7 +5869,7 @@ version = "1.17.22" dependencies = [ "assert_matches", "bincode", - "bs58", + "bs58 0.4.0", "clap 2.33.3", "console", "const_format", @@ -5732,10 +6068,11 @@ dependencies = [ name = "solana-core" version = "1.17.22" dependencies = [ + "anchor-lang", "assert_matches", "base64 0.21.4", "bincode", - "bs58", + "bs58 0.4.0", "bytes", "chrono", "crossbeam-channel", @@ -5746,11 +6083,16 @@ dependencies = [ "futures 0.3.28", "histogram", "itertools", + "jito-protos", + "jito-tip-distribution", + "jito-tip-payment", "lazy_static", "log", "lru", "min-max-heap", "num_enum 0.6.1", + "prost", + "prost-types", "quinn", "rand 0.8.5", "rand_chacha 0.3.1", @@ -5767,6 +6109,7 @@ dependencies = [ "serial_test", "solana-accounts-db", "solana-bloom", + "solana-bundle", "solana-client", "solana-core", "solana-cost-model", @@ -5783,11 +6126,13 @@ dependencies = [ "solana-perf", "solana-poh", "solana-program-runtime", + "solana-program-test", "solana-quic-client", "solana-rayon-threadlimit", "solana-rpc", "solana-rpc-client-api", "solana-runtime", + "solana-runtime-plugin", "solana-sdk", "solana-send-transaction-service", "solana-stake-program", @@ -5808,6 +6153,8 @@ dependencies = [ "test-case", "thiserror", "tokio", + "tonic", + "tonic-build", "trees", ] @@ -5941,7 +6288,7 @@ dependencies = [ "bitflags 2.3.3", "blake3", "block-buffer 0.10.4", - "bs58", + "bs58 0.4.0", "bv", "byteorder", "cc", @@ -5967,8 +6314,8 @@ dependencies = [ name = "solana-frozen-abi-macro" version = "1.17.22" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "rustc_version 0.4.0", "syn 2.0.38", ] @@ -6023,7 +6370,7 @@ dependencies = [ name = "solana-geyser-plugin-manager" version = "1.17.22" dependencies = [ - "bs58", + "bs58 0.4.0", "crossbeam-channel", "json5", "jsonrpc-core", @@ -6134,7 +6481,7 @@ dependencies = [ name = "solana-keygen" version = "1.17.22" dependencies = [ - "bs58", + "bs58 0.4.0", "clap 3.2.23", "dirs-next", "num_cpus", @@ -6154,7 +6501,7 @@ dependencies = [ "assert_matches", "bincode", "bitflags 2.3.3", - "bs58", + "bs58 0.4.0", "byteorder", "chrono", "chrono-humanize", @@ -6220,7 +6567,7 @@ name = "solana-ledger-tool" version = "1.17.22" dependencies = [ "assert_cmd", - "bs58", + "bs58 0.4.0", "bytecount", "chrono", "clap 2.33.3", @@ -6511,7 +6858,7 @@ dependencies = [ "blake3", "borsh 0.10.3", "borsh 0.9.3", - "bs58", + "bs58 0.4.0", "bv", "bytemuck", "cc", @@ -6694,7 +7041,7 @@ version = "1.17.22" dependencies = [ "base64 0.21.4", "bincode", - "bs58", + "bs58 0.4.0", "crossbeam-channel", "dashmap 4.0.2", "itertools", @@ -6714,6 +7061,7 @@ dependencies = [ "soketto", "solana-account-decoder", "solana-accounts-db", + "solana-bundle", "solana-client", "solana-entry", "solana-faucet", @@ -6724,6 +7072,7 @@ dependencies = [ "solana-net-utils", "solana-perf", "solana-poh", + "solana-program-runtime", "solana-rayon-threadlimit", "solana-rpc-client-api", "solana-runtime", @@ -6755,7 +7104,7 @@ dependencies = [ "async-trait", "base64 0.21.4", "bincode", - "bs58", + "bs58 0.4.0", "crossbeam-channel", "futures 0.3.28", "indicatif", @@ -6781,7 +7130,7 @@ name = "solana-rpc-client-api" version = "1.17.22" dependencies = [ "base64 0.21.4", - "bs58", + "bs58 0.4.0", "jsonrpc-core", "reqwest", "semver 1.0.20", @@ -6789,6 +7138,8 @@ dependencies = [ "serde_derive", "serde_json", "solana-account-decoder", + "solana-accounts-db", + "solana-bundle", "solana-sdk", "solana-transaction-status", "solana-version", @@ -6818,13 +7169,14 @@ name = "solana-rpc-test" version = "1.17.22" dependencies = [ "bincode", - "bs58", + "bs58 0.4.0", "crossbeam-channel", "futures-util", "log", "reqwest", "serde", "serde_json", + "serial_test", "solana-account-decoder", "solana-client", "solana-logger", @@ -6923,17 +7275,36 @@ dependencies = [ "zstd", ] +[[package]] +name = "solana-runtime-plugin" +version = "1.17.22" +dependencies = [ + "crossbeam-channel", + "json5", + "jsonrpc-core", + "jsonrpc-core-client", + "jsonrpc-derive", + "jsonrpc-ipc-server", + "jsonrpc-server-utils", + "libloading", + "log", + "solana-runtime", + "solana-sdk", + "thiserror", +] + [[package]] name = "solana-sdk" version = "1.17.22" dependencies = [ + "anchor-lang", "anyhow", "assert_matches", "base64 0.21.4", "bincode", "bitflags 2.3.3", "borsh 0.10.3", - "bs58", + "bs58 0.4.0", "bytemuck", "byteorder", "chrono", @@ -6985,9 +7356,9 @@ dependencies = [ name = "solana-sdk-macro" version = "1.17.22" dependencies = [ - "bs58", - "proc-macro2", - "quote", + "bs58 0.4.0", + "proc-macro2 1.0.69", + "quote 1.0.33", "rustversion", "syn 2.0.38", ] @@ -7005,11 +7376,13 @@ dependencies = [ "crossbeam-channel", "log", "solana-client", + "solana-gossip", "solana-logger", "solana-measure", "solana-metrics", "solana-runtime", "solana-sdk", + "solana-streamer", "solana-tpu-client", ] @@ -7083,7 +7456,7 @@ name = "solana-storage-proto" version = "1.17.22" dependencies = [ "bincode", - "bs58", + "bs58 0.4.0", "enum-iterator", "prost", "protobuf-src", @@ -7196,6 +7569,44 @@ dependencies = [ "solana-sdk", ] +[[package]] +name = "solana-tip-distributor" +version = "1.17.22" +dependencies = [ + "anchor-lang", + "clap 4.3.21", + "crossbeam-channel", + "env_logger", + "futures 0.3.28", + "gethostname", + "im", + "itertools", + "jito-tip-distribution", + "jito-tip-payment", + "log", + "num-traits", + "rand 0.8.5", + "serde", + "serde_json", + "solana-accounts-db", + "solana-client", + "solana-genesis-utils", + "solana-ledger", + "solana-measure", + "solana-merkle-tree", + "solana-metrics", + "solana-program", + "solana-program-runtime", + "solana-rpc-client-api", + "solana-runtime", + "solana-sdk", + "solana-stake-program", + "solana-transaction-status", + "solana-vote", + "thiserror", + "tokio", +] + [[package]] name = "solana-tokens" version = "1.17.22" @@ -7286,7 +7697,7 @@ dependencies = [ "base64 0.21.4", "bincode", "borsh 0.10.3", - "bs58", + "bs58 0.4.0", "lazy_static", "log", "serde", @@ -7409,6 +7820,7 @@ dependencies = [ "solana-rpc-client", "solana-rpc-client-api", "solana-runtime", + "solana-runtime-plugin", "solana-sdk", "solana-send-transaction-service", "solana-storage-bigtable", @@ -7421,6 +7833,7 @@ dependencies = [ "symlink", "thiserror", "tikv-jemallocator", + "tonic", ] [[package]] @@ -7502,7 +7915,7 @@ dependencies = [ name = "solana-zk-keygen" version = "1.17.22" dependencies = [ - "bs58", + "bs58 0.4.0", "clap 3.2.23", "dirs-next", "num_cpus", @@ -7646,7 +8059,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fadbefec4f3c678215ca72bd71862697bb06b41fd77c0088902dd3203354387b" dependencies = [ - "quote", + "quote 1.0.33", "spl-discriminator-syn", "syn 2.0.38", ] @@ -7657,8 +8070,8 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e5f2044ca42c8938d54d1255ce599c79a1ffd86b677dfab695caa20f9ffc3f2" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "sha2 0.10.8", "syn 2.0.38", "thiserror", @@ -7715,8 +8128,8 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab5269c8e868da17b6552ef35a51355a017bd8e0eae269c201fef830d35fa52c" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "sha2 0.10.8", "syn 2.0.38", ] @@ -7874,9 +8287,9 @@ version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ - "heck", - "proc-macro2", - "quote", + "heck 0.4.0", + "proc-macro2 1.0.69", + "quote 1.0.33", "rustversion", "syn 1.0.109", ] @@ -7893,14 +8306,25 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7973cce6668464ea31f176d85b13c7ab3bba2cb3b77a2ed26abd7801688010a" +[[package]] +name = "syn" +version = "0.15.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5" +dependencies = [ + "proc-macro2 0.4.30", + "quote 0.6.13", + "unicode-xid 0.1.0", +] + [[package]] name = "syn" version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "unicode-ident", ] @@ -7910,8 +8334,8 @@ version = "2.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "unicode-ident", ] @@ -7927,10 +8351,10 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 1.0.109", - "unicode-xid", + "unicode-xid 0.2.2", ] [[package]] @@ -8032,8 +8456,8 @@ version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ee42b4e559f17bce0385ebf511a7beb67d5cc33c12c96b7f4e9789919d9c10f" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 1.0.109", ] @@ -8082,8 +8506,8 @@ checksum = "54c25e2cb8f5fcd7318157634e8838aa6f7e4715c96637f969fabaccd1ef5462" dependencies = [ "cfg-if 1.0.0", "proc-macro-error", - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 2.0.38", ] @@ -8094,8 +8518,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37cfd7bbc88a0104e304229fba519bdc45501a30b760fb72240342f1289ad257" dependencies = [ "proc-macro-error", - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 2.0.38", "test-case-core", ] @@ -8130,8 +8554,8 @@ version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 2.0.38", ] @@ -8267,8 +8691,8 @@ name = "tokio-macros" version = "2.1.0" source = "git+https://github.com/solana-labs/solana-tokio.git?rev=7cf47705faacf7bf0e43e4131a5377b3291fce21#7cf47705faacf7bf0e43e4131a5377b3291fce21" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 2.0.38", ] @@ -8394,6 +8818,7 @@ dependencies = [ "percent-encoding 2.3.0", "pin-project", "prost", + "rustls-native-certs", "rustls-pemfile 1.0.0", "tokio", "tokio-rustls", @@ -8402,6 +8827,7 @@ dependencies = [ "tower-layer", "tower-service", "tracing", + "webpki-roots 0.23.1", ] [[package]] @@ -8411,9 +8837,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6fdaae4c2c638bb70fe42803a26fbd6fc6ac8c72f5c59f67ecc2a2dcabf4b07" dependencies = [ "prettyplease 0.1.9", - "proc-macro2", + "proc-macro2 1.0.69", "prost-build", - "quote", + "quote 1.0.33", "syn 1.0.109", ] @@ -8467,8 +8893,8 @@ version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 2.0.38", ] @@ -8586,12 +9012,24 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-segmentation" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" + [[package]] name = "unicode-width" version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" +[[package]] +name = "unicode-xid" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" + [[package]] name = "unicode-xid" version = "0.2.2" @@ -8673,6 +9111,12 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7cf7d77f457ef8dfa11e4cd5933c5ddb5dc52a94664071951219a97710f0a32b" +[[package]] +name = "utf8parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" + [[package]] name = "valuable" version = "0.1.0" @@ -8764,8 +9208,8 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 2.0.38", "wasm-bindgen-shared", ] @@ -8788,7 +9232,7 @@ version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ - "quote", + "quote 1.0.33", "wasm-bindgen-macro-support", ] @@ -8798,8 +9242,8 @@ version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 2.0.38", "wasm-bindgen-backend", "wasm-bindgen-shared", @@ -8821,13 +9265,22 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "webpki-roots" +version = "0.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338" +dependencies = [ + "rustls-webpki 0.100.3", +] + [[package]] name = "webpki-roots" version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b291546d5d9d1eab74f069c77749f2cb8504a12caa20f0f2de93ddbf6f411888" dependencies = [ - "rustls-webpki", + "rustls-webpki 0.101.4", ] [[package]] @@ -8908,6 +9361,15 @@ dependencies = [ "windows-targets 0.48.0", ] +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.0", +] + [[package]] name = "windows-targets" version = "0.42.2" @@ -8938,6 +9400,21 @@ dependencies = [ "windows_x86_64_msvc 0.48.0", ] +[[package]] +name = "windows-targets" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +dependencies = [ + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" @@ -8950,6 +9427,12 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" + [[package]] name = "windows_aarch64_msvc" version = "0.42.2" @@ -8962,6 +9445,12 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" + [[package]] name = "windows_i686_gnu" version = "0.42.2" @@ -8974,6 +9463,12 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" +[[package]] +name = "windows_i686_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" + [[package]] name = "windows_i686_msvc" version = "0.42.2" @@ -8986,6 +9481,12 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" +[[package]] +name = "windows_i686_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" + [[package]] name = "windows_x86_64_gnu" version = "0.42.2" @@ -8998,6 +9499,12 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" + [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" @@ -9010,6 +9517,12 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" + [[package]] name = "windows_x86_64_msvc" version = "0.42.2" @@ -9022,6 +9535,12 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" + [[package]] name = "winreg" version = "0.50.0" @@ -9068,6 +9587,12 @@ dependencies = [ "linked-hash-map", ] +[[package]] +name = "yansi" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" + [[package]] name = "yasna" version = "0.5.0" @@ -9092,8 +9617,8 @@ version = "0.7.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 2.0.38", ] @@ -9112,8 +9637,8 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.69", + "quote 1.0.33", "syn 2.0.38", ] diff --git a/Cargo.toml b/Cargo.toml index eea675a6f2..68ebabcb09 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,6 +17,7 @@ members = [ "bench-tps", "bloom", "bucket_map", + "bundle", "clap-utils", "clap-v3-utils", "cli", @@ -39,6 +40,7 @@ members = [ "geyser-plugin-manager", "gossip", "install", + "jito-protos", "keygen", "ledger", "ledger-tool", @@ -83,6 +85,7 @@ members = [ "rpc-client-nonce-utils", "rpc-test", "runtime", + "runtime-plugin", "runtime/store-tool", "sdk", "sdk/cargo-build-bpf", @@ -100,6 +103,7 @@ members = [ "streamer", "test-validator", "thin-client", + "tip-distributor", "tokens", "tpu-client", "transaction-dos", @@ -116,6 +120,8 @@ members = [ ] exclude = [ + "anchor", + "jito-programs", "programs/sbf", ] @@ -134,6 +140,7 @@ edition = "2021" aes-gcm-siv = "0.10.3" ahash = "=0.8.5" anyhow = "1.0.75" +anchor-lang = { path = "anchor/lang" } ark-bn254 = "0.4.0" ark-ec = "0.4.0" ark-ff = "0.4.0" @@ -221,6 +228,9 @@ Inflector = "0.11.4" itertools = "0.10.5" jemallocator = { package = "tikv-jemallocator", version = "0.4.1", features = ["unprefixed_malloc_on_supported_platforms"] } js-sys = "0.3.64" +jito-protos = { path = "jito-protos", version = "=1.17.22" } +jito-tip-distribution = { path = "jito-programs/mev-programs/programs/tip-distribution", features = ["no-entrypoint"] } +jito-tip-payment = { path = "jito-programs/mev-programs/programs/tip-payment", features = ["no-entrypoint"] } json5 = "0.4.1" jsonrpc-core = "18.0.0" jsonrpc-core-client = "18.0.0" @@ -309,6 +319,7 @@ solana-bench-tps = { path = "bench-tps", version = "=1.17.22" } solana-bloom = { path = "bloom", version = "=1.17.22" } solana-bpf-loader-program = { path = "programs/bpf_loader", version = "=1.17.22" } solana-bucket-map = { path = "bucket_map", version = "=1.17.22" } +solana-bundle = { path = "bundle", version = "=1.17.22" } solana-connection-cache = { path = "connection-cache", version = "=1.17.22", default-features = false } solana-clap-utils = { path = "clap-utils", version = "=1.17.22" } solana-clap-v3-utils = { path = "clap-v3-utils", version = "=1.17.22" } @@ -353,6 +364,7 @@ solana-rpc-client = { path = "rpc-client", version = "=1.17.22", default-feature solana-rpc-client-api = { path = "rpc-client-api", version = "=1.17.22" } solana-rpc-client-nonce-utils = { path = "rpc-client-nonce-utils", version = "=1.17.22" } solana-runtime = { path = "runtime", version = "=1.17.22" } +solana-runtime-plugin = { path = "runtime-plugin", version = "=1.17.22" } solana-sdk = { path = "sdk", version = "=1.17.22" } solana-sdk-macro = { path = "sdk/macro", version = "=1.17.22" } solana-send-transaction-service = { path = "send-transaction-service", version = "=1.17.22" } diff --git a/README.md b/README.md index 4fccacf2ba..750e797895 100644 --- a/README.md +++ b/README.md @@ -4,142 +4,9 @@

-[![Solana crate](https://img.shields.io/crates/v/solana-core.svg)](https://crates.io/crates/solana-core) -[![Solana documentation](https://docs.rs/solana-core/badge.svg)](https://docs.rs/solana-core) -[![Build status](https://badge.buildkite.com/8cc350de251d61483db98bdfc895b9ea0ac8ffa4a32ee850ed.svg?branch=master)](https://buildkite.com/solana-labs/solana/builds?branch=master) -[![codecov](https://codecov.io/gh/solana-labs/solana/branch/master/graph/badge.svg)](https://codecov.io/gh/solana-labs/solana) +[![Build status](https://badge.buildkite.com/3a7c88c0f777e1a0fddacc190823565271ae4c251ef78d83a8.svg)](https://buildkite.com/jito/jito-solana) -# Building +# About +This repository contains Jito's fork of the Solana validator. -## **1. Install rustc, cargo and rustfmt.** - -```bash -$ curl https://sh.rustup.rs -sSf | sh -$ source $HOME/.cargo/env -$ rustup component add rustfmt -``` - -When building the master branch, please make sure you are using the latest stable rust version by running: - -```bash -$ rustup update -``` - -When building a specific release branch, you should check the rust version in `ci/rust-version.sh` and if necessary, install that version by running: -```bash -$ rustup install VERSION -``` -Note that if this is not the latest rust version on your machine, cargo commands may require an [override](https://rust-lang.github.io/rustup/overrides.html) in order to use the correct version. - -On Linux systems you may need to install libssl-dev, pkg-config, zlib1g-dev, protobuf etc. - -On Ubuntu: -```bash -$ sudo apt-get update -$ sudo apt-get install libssl-dev libudev-dev pkg-config zlib1g-dev llvm clang cmake make libprotobuf-dev protobuf-compiler -``` - -On Fedora: -```bash -$ sudo dnf install openssl-devel systemd-devel pkg-config zlib-devel llvm clang cmake make protobuf-devel protobuf-compiler perl-core -``` - -## **2. Download the source code.** - -```bash -$ git clone https://github.com/solana-labs/solana.git -$ cd solana -``` - -## **3. Build.** - -```bash -$ ./cargo build -``` - -# Testing - -**Run the test suite:** - -```bash -$ ./cargo test -``` - -### Starting a local testnet -Start your own testnet locally, instructions are in the [online docs](https://docs.solana.com/cluster/bench-tps). - -### Accessing the remote development cluster -* `devnet` - stable public cluster for development accessible via -devnet.solana.com. Runs 24/7. Learn more about the [public clusters](https://docs.solana.com/clusters) - -# Benchmarking - -First, install the nightly build of rustc. `cargo bench` requires the use of the -unstable features only available in the nightly build. - -```bash -$ rustup install nightly -``` - -Run the benchmarks: - -```bash -$ cargo +nightly bench -``` - -# Release Process - -The release process for this project is described [here](RELEASE.md). - -# Code coverage - -To generate code coverage statistics: - -```bash -$ scripts/coverage.sh -$ open target/cov/lcov-local/index.html -``` - -Why coverage? While most see coverage as a code quality metric, we see it primarily as a developer -productivity metric. When a developer makes a change to the codebase, presumably it's a *solution* to -some problem. Our unit-test suite is how we encode the set of *problems* the codebase solves. Running -the test suite should indicate that your change didn't *infringe* on anyone else's solutions. Adding a -test *protects* your solution from future changes. Say you don't understand why a line of code exists, -try deleting it and running the unit-tests. The nearest test failure should tell you what problem -was solved by that code. If no test fails, go ahead and submit a Pull Request that asks, "what -problem is solved by this code?" On the other hand, if a test does fail and you can think of a -better way to solve the same problem, a Pull Request with your solution would most certainly be -welcome! Likewise, if rewriting a test can better communicate what code it's protecting, please -send us that patch! - -# Disclaimer - -All claims, content, designs, algorithms, estimates, roadmaps, -specifications, and performance measurements described in this project -are done with the Solana Labs, Inc. (“SL”) good faith efforts. It is up to -the reader to check and validate their accuracy and truthfulness. -Furthermore, nothing in this project constitutes a solicitation for -investment. - -Any content produced by SL or developer resources that SL provides are -for educational and inspirational purposes only. SL does not encourage, -induce or sanction the deployment, integration or use of any such -applications (including the code comprising the Solana blockchain -protocol) in violation of applicable laws or regulations and hereby -prohibits any such deployment, integration or use. This includes the use of -any such applications by the reader (a) in violation of export control -or sanctions laws of the United States or any other applicable -jurisdiction, (b) if the reader is located in or ordinarily resident in -a country or territory subject to comprehensive sanctions administered -by the U.S. Office of Foreign Assets Control (OFAC), or (c) if the -reader is or is working on behalf of a Specially Designated National -(SDN) or a person subject to similar blocking or denied party -prohibitions. - -The reader should be aware that U.S. export control and sanctions laws prohibit -U.S. persons (and other persons that are subject to such laws) from transacting -with persons in certain countries and territories or that are on the SDN list. -Accordingly, there is a risk to individuals that other persons using any of the -code contained in this repo, or a derivation thereof, may be sanctioned persons -and that transactions with such persons would be a violation of U.S. export -controls and sanctions law. +We recommend checking out our [Gitbook](https://jito-foundation.gitbook.io/mev/jito-solana/building-the-software) for more detailed instructions on building and running Jito-Solana. diff --git a/SECURITY.md b/SECURITY.md deleted file mode 100644 index 905316c2dc..0000000000 --- a/SECURITY.md +++ /dev/null @@ -1,167 +0,0 @@ -# Security Policy - -1. [Reporting security problems](#reporting) -4. [Security Bug Bounties](#bounty) -2. [Incident Response Process](#process) - - -## Reporting security problems in the Solana Labs Validator Client - -**DO NOT CREATE A GITHUB ISSUE** to report a security problem. - -Instead please use this [Report a Vulnerability](https://github.com/solana-labs/solana/security/advisories/new) link. -Provide a helpful title, detailed description of the vulnerability and an exploit -proof-of-concept. Speculative submissions without proof-of-concept will be closed -with no further consideration. - -If you haven't done so already, please **enable two-factor auth** in your GitHub account. - -Expect a response as fast as possible in the advisory, typically within 72 hours. - --- - -If you do not receive a response in the advisory, send an email to -security@solanalabs.com with the full URL of the advisory you have created. DO NOT -include attachments or provide detail sufficient for exploitation regarding the -security issue in this email. **Only provide such details in the advisory**. - -If you do not receive a response from security@solanalabs.com please followup with -the team directly. You can do this in the `#core-technology` channel of the -[Solana Tech discord server](https://solana.com/discord), by pinging the `Solana Labs` -role in the channel and referencing the fact that you submitted a security problem. - - -## Incident Response Process - -In case an incident is discovered or reported, the following process will be -followed to contain, respond and remediate: - -### 1. Accept the new report -In response a newly reported security problem, a member of the -`solana-labs/admins` group will accept the report to turn it into a draft -advisory. The `solana-labs/security-incident-response` group should be added to -the draft security advisory, and create a private fork of the repository (grey -button towards the bottom of the page) if necessary. - -If the advisory is the result of an audit finding, follow the same process as above but add the auditor's github user(s) and begin the title with "[Audit]". - -If the report is out of scope, a member of the `solana-labs/admins` group will -comment as such and then close the report. - -### 2. Triage -Within the draft security advisory, discuss and determine the severity of the issue. If necessary, members of the solana-labs/security-incident-response group may add other github users to the advisory to assist. -If it is determined that this not a critical network issue then the advisory should be closed and if more follow-up is required a normal Solana public github issue should be created. - -### 3. Prepare Fixes -For the affected branches, typically all three (edge, beta and stable), prepare a fix for the issue and push them to the corresponding branch in the private repository associated with the draft security advisory. -There is no CI available in the private repository so you must build from source and manually verify fixes. -Code review from the reporter is ideal, as well as from multiple members of the core development team. - -### 4. Notify Security Group Validators -Once an ETA is available for the fix, a member of the solana-labs/security-incident-response group should notify the validators so they can prepare for an update using the "Solana Red Alert" notification system. -The teams are all over the world and it's critical to provide actionable information at the right time. Don't be the person that wakes everybody up at 2am when a fix won't be available for hours. - -### 5. Ship the patch -Once the fix is accepted, a member of the solana-labs/security-incident-response group should prepare a single patch file for each affected branch. The commit title for the patch should only contain the advisory id, and not disclose any further details about the incident. -Copy the patches to https://release.solana.com/ under a subdirectory named after the advisory id (example: https://release.solana.com/GHSA-hx59-f5g4-jghh/v1.4.patch). Contact a member of the solana-labs/admins group if you require access to release.solana.com -Using the "Solana Red Alert" channel: - a) Notify validators that there's an issue and a patch will be provided in X minutes - b) If X minutes expires and there's no patch, notify of the delay and provide a new ETA - c) Provide links to patches of https://release.solana.com/ for each affected branch -Validators can be expected to build the patch from source against the latest release for the affected branch. -Since the software version will not change after the patch is applied, request that each validator notify in the existing channel once they've updated. Manually monitor the roll out until a sufficient amount of stake has updated - typically at least 33.3% or 66.6% depending on the issue. - -### 6. Public Disclosure and Release -Once the fix has been deployed to the security group validators, the patches from the security advisory may be merged into the main source repository. A new official release for each affected branch should be shipped and all validators requested to upgrade as quickly as possible. - -### 7. Security Advisory Bounty Accounting and Cleanup -If this issue is [eligible](#eligibility) for a bounty, prefix the title of the -security advisory with one of the following, depending on the severity: -- [Bounty Category: Critical: Loss of Funds] -- [Bounty Category: Critical: Consensus / Safety Violations] -- [Bounty Category: Critical: Liveness / Loss of Availability] -- [Bounty Category: Critical: DoS Attacks] -- [Bounty Category: Supply Chain Attacks] -- [Bounty Category: RPC] - -Confirm with the reporter that they agree with the severity assessment, and discuss as required to reach a conclusion. - -We currently do not use the Github workflow to publish security advisories. Once the issue and fix have been disclosed, and a bounty category is assessed if appropriate, the GitHub security advisory is no longer needed and can be closed. - - -## Security Bug Bounties -At its sole discretion, the Solana Foundation may offer a bounty for -[valid reports](#reporting) of critical Solana vulnerabilities. Please see below -for more details. The submitter is not required to provide a -mitigation to qualify. - -#### Loss of Funds: -$2,000,000 USD in locked SOL tokens (locked for 12 months) -* Theft of funds without users signature from any account -* Theft of funds without users interaction in system, token, stake, vote programs -* Theft of funds that requires users signature - creating a vote program that drains the delegated stakes. - -#### Consensus/Safety Violations: -$1,000,000 USD in locked SOL tokens (locked for 12 months) -* Consensus safety violation -* Tricking a validator to accept an optimistic confirmation or rooted slot without a double vote, etc. - -#### Liveness / Loss of Availability: -$400,000 USD in locked SOL tokens (locked for 12 months) -* Whereby consensus halts and requires human intervention -* Eclipse attacks, -* Remote attacks that partition the network, - -#### DoS Attacks: -$100,000 USD in locked SOL tokens (locked for 12 months) -* Remote resource exaustion via Non-RPC protocols - -#### Supply Chain Attacks: -$100,000 USD in locked SOL tokens (locked for 12 months) -* Non-social attacks against source code change management, automated testing, release build, release publication and release hosting infrastructure of the monorepo. - -#### RPC DoS/Crashes: -$5,000 USD in locked SOL tokens (locked for 12 months) -* RPC attacks - -### Out of Scope: -The following components are out of scope for the bounty program -* Metrics: `/metrics` in the monorepo as well as https://metrics.solana.com -* Any encrypted credentials, auth tokens, etc. checked into the repo -* Bugs in dependencies. Please take them upstream! -* Attacks that require social engineering -* Any undeveloped automated tooling (scanners, etc) results. (OK with developed PoC) -* Any asset whose source code does not exist in this repository (including, but not limited -to, any and all web properties not explicitly listed on this page) - -### Eligibility: -* Submissions _MUST_ include an exploit proof-of-concept to be considered eligible -* The participant submitting the bug report shall follow the process outlined within this document -* Valid exploits can be eligible even if they are not successfully executed on a public cluster -* Multiple submissions for the same class of exploit are still eligible for compensation, though may be compensated at a lower rate, however these will be assessed on a case-by-case basis -* Participants must complete KYC and sign the participation agreement here when the registrations are open https://solana.foundation/kyc. Security exploits will still be assessed and open for submission at all times. This needs only be done prior to distribution of tokens. - -### Duplicate Reports -Compensation for duplicative reports will be split among reporters with first to report taking priority using the following equation -``` -R: total reports -ri: report priority -bi: bounty share - -bi = 2 ^ (R - ri) / ((2^R) - 1) -``` -#### Bounty Split Examples -| total reports | priority | share | | total reports | priority | share | | total reports | priority | share | -| ------------- | -------- | -----: | - | ------------- | -------- | -----: | - | ------------- | -------- | -----: | -| 1 | 1 | 100% | | 2 | 1 | 66.67% | | 5 | 1 | 51.61% | -| | | | | 2 | 2 | 33.33% | | 5 | 2 | 25.81% | -| 4 | 1 | 53.33% | | | | | | 5 | 3 | 12.90% | -| 4 | 2 | 26.67% | | 3 | 1 | 57.14% | | 5 | 4 | 6.45% | -| 4 | 3 | 13.33% | | 3 | 2 | 28.57% | | 5 | 5 | 3.23% | -| 4 | 4 | 6.67% | | 3 | 3 | 14.29% | | | | | - -### Payment of Bug Bounties: -* Bounties are currently awarded on a rolling/weekly basis and paid out within 30 days upon receipt of an invoice. -* The SOL/USD conversion rate used for payments is the market price of SOL (denominated in USD) at the end of the day the invoice is submitted by the researcher. -* The reference for this price is the Closing Price given by Coingecko.com on that date given here: https://www.coingecko.com/en/coins/solana/historical_data/usd#panel -* Bug bounties that are paid out in SOL are paid to stake accounts with a lockup expiring 12 months from the date of delivery of SOL. diff --git a/accounts-db/src/account_overrides.rs b/accounts-db/src/account_overrides.rs index ee8e7ec9e2..d5d3286426 100644 --- a/accounts-db/src/account_overrides.rs +++ b/accounts-db/src/account_overrides.rs @@ -4,12 +4,16 @@ use { }; /// Encapsulates overridden accounts, typically used for transaction simulations -#[derive(Default)] +#[derive(Clone, Default)] pub struct AccountOverrides { accounts: HashMap, } impl AccountOverrides { + pub fn upsert_account_overrides(&mut self, other: AccountOverrides) { + self.accounts.extend(other.accounts); + } + pub fn set_account(&mut self, pubkey: &Pubkey, account: Option) { match account { Some(account) => self.accounts.insert(*pubkey, account), diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index 47b372d981..3f8a1d677a 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -1173,19 +1173,24 @@ impl Accounts { } fn lock_account( - &self, account_locks: &mut AccountLocks, writable_keys: Vec<&Pubkey>, readonly_keys: Vec<&Pubkey>, + additional_read_locks: &HashSet, + additional_write_locks: &HashSet, ) -> Result<()> { for k in writable_keys.iter() { - if account_locks.is_locked_write(k) || account_locks.is_locked_readonly(k) { + if account_locks.is_locked_write(k) + || account_locks.is_locked_readonly(k) + || additional_write_locks.contains(k) + || additional_read_locks.contains(k) + { debug!("Writable account in use: {:?}", k); return Err(TransactionError::AccountInUse); } } for k in readonly_keys.iter() { - if account_locks.is_locked_write(k) { + if account_locks.is_locked_write(k) || additional_write_locks.contains(k) { debug!("Read-only account in use: {:?}", k); return Err(TransactionError::AccountInUse); } @@ -1230,7 +1235,11 @@ impl Accounts { let tx_account_locks_results: Vec> = txs .map(|tx| tx.get_account_locks(tx_account_lock_limit)) .collect(); - self.lock_accounts_inner(tx_account_locks_results) + self.lock_accounts_inner( + tx_account_locks_results, + &HashSet::default(), + &HashSet::default(), + ) } #[must_use] @@ -1240,6 +1249,8 @@ impl Accounts { txs: impl Iterator, results: impl Iterator>, tx_account_lock_limit: usize, + additional_read_locks: &HashSet, + additional_write_locks: &HashSet, ) -> Vec> { let tx_account_locks_results: Vec> = txs .zip(results) @@ -1248,22 +1259,30 @@ impl Accounts { Err(err) => Err(err), }) .collect(); - self.lock_accounts_inner(tx_account_locks_results) + self.lock_accounts_inner( + tx_account_locks_results, + additional_read_locks, + additional_write_locks, + ) } #[must_use] fn lock_accounts_inner( &self, tx_account_locks_results: Vec>, + additional_read_locks: &HashSet, + additional_write_locks: &HashSet, ) -> Vec> { let account_locks = &mut self.account_locks.lock().unwrap(); tx_account_locks_results .into_iter() .map(|tx_account_locks_result| match tx_account_locks_result { - Ok(tx_account_locks) => self.lock_account( + Ok(tx_account_locks) => Self::lock_account( account_locks, tx_account_locks.writable, tx_account_locks.readonly, + additional_read_locks, + additional_write_locks, ), Err(err) => Err(err), }) @@ -1313,7 +1332,7 @@ impl Accounts { lamports_per_signature: u64, include_slot_in_hash: IncludeSlotInHash, ) { - let (accounts_to_store, transactions) = self.collect_accounts_to_store( + let (accounts_to_store, transactions) = Self::collect_accounts_to_store( txs, res, loaded, @@ -1340,8 +1359,7 @@ impl Accounts { } #[allow(clippy::too_many_arguments)] - fn collect_accounts_to_store<'a>( - &self, + pub fn collect_accounts_to_store<'a>( txs: &'a [SanitizedTransaction], execution_results: &'a [TransactionExecutionResult], load_results: &'a mut [TransactionLoadResult], @@ -1410,6 +1428,55 @@ impl Accounts { } (accounts, transactions) } + + #[must_use] + fn lock_accounts_sequential_inner( + &self, + tx_account_locks_results: Vec>, + ) -> Vec> { + let mut l_account_locks = self.account_locks.lock().unwrap(); + Self::lock_accounts_sequential(&mut l_account_locks, tx_account_locks_results) + } + + pub fn lock_accounts_sequential( + account_locks: &mut AccountLocks, + tx_account_locks_results: Vec>, + ) -> Vec> { + let mut account_in_use_set = false; + tx_account_locks_results + .into_iter() + .map(|tx_account_locks_result| match tx_account_locks_result { + Ok(tx_account_locks) => match account_in_use_set { + true => Err(TransactionError::AccountInUse), + false => { + let locked = Self::lock_account( + account_locks, + tx_account_locks.writable, + tx_account_locks.readonly, + &HashSet::default(), + &HashSet::default(), + ); + if matches!(locked, Err(TransactionError::AccountInUse)) { + account_in_use_set = true; + } + locked + } + }, + Err(err) => Err(err), + }) + .collect() + } + + pub fn lock_accounts_sequential_with_results<'a>( + &self, + txs: impl Iterator, + tx_account_lock_limit: usize, + ) -> Vec> { + let tx_account_locks_results: Vec> = txs + .map(|tx| tx.get_account_locks(tx_account_lock_limit)) + .collect(); + self.lock_accounts_sequential_inner(tx_account_locks_results) + } } fn prepare_if_nonce_account( @@ -1498,6 +1565,7 @@ mod tests { sync::atomic::{AtomicBool, AtomicU64, Ordering}, thread, time, }, + Accounts, }; fn new_sanitized_tx( @@ -3171,6 +3239,8 @@ mod tests { txs.iter(), qos_results.into_iter(), MAX_TX_ACCOUNT_LOCKS, + &HashSet::default(), + &HashSet::default(), ); assert!(results[0].is_ok()); // Read-only account (keypair0) can be referenced multiple times @@ -3292,7 +3362,7 @@ mod tests { } let txs = vec![tx0.clone(), tx1.clone()]; let execution_results = vec![new_execution_result(Ok(()), None); 2]; - let (collected_accounts, transactions) = accounts.collect_accounts_to_store( + let (collected_accounts, transactions) = Accounts::collect_accounts_to_store( &txs, &execution_results, loaded.as_mut_slice(), @@ -3756,7 +3826,7 @@ mod tests { let mut loaded = vec![loaded]; let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); - let accounts = Accounts::new_with_config_for_tests( + let _accounts = Accounts::new_with_config_for_tests( Vec::new(), &ClusterType::Development, AccountSecondaryIndexes::default(), @@ -3770,7 +3840,7 @@ mod tests { )), nonce.as_ref(), )]; - let (collected_accounts, _) = accounts.collect_accounts_to_store( + let (collected_accounts, _) = Accounts::collect_accounts_to_store( &txs, &execution_results, loaded.as_mut_slice(), @@ -3869,7 +3939,7 @@ mod tests { let mut loaded = vec![loaded]; let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); - let accounts = Accounts::new_with_config_for_tests( + let _accounts = Accounts::new_with_config_for_tests( Vec::new(), &ClusterType::Development, AccountSecondaryIndexes::default(), @@ -3883,7 +3953,7 @@ mod tests { )), nonce.as_ref(), )]; - let (collected_accounts, _) = accounts.collect_accounts_to_store( + let (collected_accounts, _) = Accounts::collect_accounts_to_store( &txs, &execution_results, loaded.as_mut_slice(), diff --git a/anchor b/anchor new file mode 160000 index 0000000000..4f52f41cbe --- /dev/null +++ b/anchor @@ -0,0 +1 @@ +Subproject commit 4f52f41cbeafb77d85c7b712516dfbeb5b86dd5f diff --git a/banking-bench/src/main.rs b/banking-bench/src/main.rs index 524928aee7..80418326f5 100644 --- a/banking-bench/src/main.rs +++ b/banking-bench/src/main.rs @@ -9,6 +9,7 @@ use { solana_core::{ banking_stage::BankingStage, banking_trace::{BankingPacketBatch, BankingTracer, BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT}, + bundle_stage::bundle_account_locker::BundleAccountLocker, }, solana_gossip::cluster_info::{ClusterInfo, Node}, solana_ledger::{ @@ -36,6 +37,7 @@ use { solana_streamer::socket::SocketAddrSpace, solana_tpu_client::tpu_client::DEFAULT_TPU_CONNECTION_POOL_SIZE, std::{ + collections::HashSet, sync::{atomic::Ordering, Arc, RwLock}, thread::sleep, time::{Duration, Instant}, @@ -57,9 +59,15 @@ fn check_txs( let now = Instant::now(); let mut no_bank = false; loop { - if let Ok((_bank, (entry, _tick_height))) = receiver.recv_timeout(Duration::from_millis(10)) + if let Ok(WorkingBankEntry { + bank: _, + entries_ticks, + }) = receiver.recv_timeout(Duration::from_millis(10)) { - total += entry.transactions.len(); + total += entries_ticks + .iter() + .map(|e| e.0.transactions.len()) + .sum::(); } if total >= ref_tx_count { break; @@ -463,6 +471,8 @@ fn main() { Arc::new(connection_cache), bank_forks.clone(), &Arc::new(PrioritizationFeeCache::new(0u64)), + HashSet::default(), + BundleAccountLocker::default(), ); // This is so that the signal_receiver does not go out of scope after the closure. diff --git a/banks-server/Cargo.toml b/banks-server/Cargo.toml index 1404d88b5c..94f2531cec 100644 --- a/banks-server/Cargo.toml +++ b/banks-server/Cargo.toml @@ -16,6 +16,7 @@ futures = { workspace = true } solana-accounts-db = { workspace = true } solana-banks-interface = { workspace = true } solana-client = { workspace = true } +solana-gossip = { workspace = true } solana-runtime = { workspace = true } solana-sdk = { workspace = true } solana-send-transaction-service = { workspace = true } diff --git a/banks-server/src/banks_server.rs b/banks-server/src/banks_server.rs index a04d542108..e9e09ee6a4 100644 --- a/banks-server/src/banks_server.rs +++ b/banks-server/src/banks_server.rs @@ -9,6 +9,7 @@ use { TransactionSimulationDetails, TransactionStatus, }, solana_client::connection_cache::ConnectionCache, + solana_gossip::cluster_info::ClusterInfo, solana_runtime::{ bank::{Bank, TransactionSimulationResult}, bank_forks::BankForks, @@ -438,7 +439,7 @@ pub async fn start_local_server( pub async fn start_tcp_server( listen_addr: SocketAddr, - tpu_addr: SocketAddr, + cluster_info: Arc, bank_forks: Arc>, block_commitment_cache: Arc>, connection_cache: Arc, @@ -463,7 +464,7 @@ pub async fn start_tcp_server( let (sender, receiver) = unbounded(); SendTransactionService::new::( - tpu_addr, + cluster_info.clone(), &bank_forks, None, receiver, diff --git a/bootstrap b/bootstrap new file mode 100755 index 0000000000..d9b1eed6f4 --- /dev/null +++ b/bootstrap @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +set -eu + +BANK_HASH=$(cargo run --release --bin solana-ledger-tool -- -l config/bootstrap-validator bank-hash) + +# increase max file handle limit +ulimit -Hn 1000000 + +# if above fails, run: +# sudo bash -c 'echo "* hard nofile 1000000" >> /etc/security/limits.conf' + +# NOTE: make sure tip-payment and tip-distribution program are deployed using the correct pubkeys +RUST_LOG=INFO,solana_core::bundle_stage=DEBUG \ + NDEBUG=1 ./multinode-demo/bootstrap-validator.sh \ + --wait-for-supermajority 0 \ + --expected-bank-hash "$BANK_HASH" \ + --block-engine-url http://127.0.0.1 \ + --relayer-url http://127.0.0.1:11226 \ + --rpc-pubsub-enable-block-subscription \ + --enable-rpc-transaction-history \ + --tip-payment-program-pubkey T1pyyaTNZsKv2WcRAB8oVnk93mLJw2XzjtVYqCsaHqt \ + --tip-distribution-program-pubkey 4R3gSG8BpU4t19KYj8CfnbtRpnT8gtk4dvTHxVRwc2r7 \ + --commission-bps 0 \ + --shred-receiver-address 127.0.0.1:1002 \ + --trust-relayer-packets \ + --trust-block-engine-packets diff --git a/bundle/Cargo.toml b/bundle/Cargo.toml new file mode 100644 index 0000000000..babb13bcd7 --- /dev/null +++ b/bundle/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "solana-bundle" +description = "Library related to handling bundles" +documentation = "https://docs.rs/solana-bundle" +readme = "../README.md" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +anchor-lang = { workspace = true } +itertools = { workspace = true } +log = { workspace = true } +serde = { workspace = true } +solana-accounts-db = { workspace = true } +solana-ledger = { workspace = true } +solana-logger = { workspace = true } +solana-measure = { workspace = true } +solana-poh = { workspace = true } +solana-program-runtime = { workspace = true } +solana-runtime = { workspace = true } +solana-sdk = { workspace = true } +solana-transaction-status = { workspace = true } +thiserror = { workspace = true } + +[dev-dependencies] +assert_matches = { workspace = true } +solana-logger = { workspace = true } + +[lib] +crate-type = ["lib"] +name = "solana_bundle" diff --git a/bundle/src/bundle_execution.rs b/bundle/src/bundle_execution.rs new file mode 100644 index 0000000000..9b438fa713 --- /dev/null +++ b/bundle/src/bundle_execution.rs @@ -0,0 +1,1187 @@ +use { + itertools::izip, + log::*, + solana_accounts_db::{ + account_overrides::AccountOverrides, accounts::TransactionLoadResult, + transaction_results::TransactionExecutionResult, + }, + solana_ledger::token_balances::collect_token_balances, + solana_measure::{measure::Measure, measure_us}, + solana_program_runtime::timings::ExecuteTimings, + solana_runtime::{ + bank::{Bank, LoadAndExecuteTransactionsOutput, TransactionBalances}, + transaction_batch::TransactionBatch, + }, + solana_sdk::{ + account::AccountSharedData, + bundle::SanitizedBundle, + pubkey::Pubkey, + saturating_add_assign, + signature::Signature, + transaction::{SanitizedTransaction, TransactionError, VersionedTransaction}, + }, + solana_transaction_status::{token_balances::TransactionTokenBalances, PreBalanceInfo}, + std::{ + cmp::{max, min}, + time::{Duration, Instant}, + }, + thiserror::Error, +}; + +#[derive(Clone, Default)] +pub struct BundleExecutionMetrics { + pub num_retries: u64, + pub collect_balances_us: u64, + pub load_execute_us: u64, + pub collect_pre_post_accounts_us: u64, + pub cache_accounts_us: u64, + pub execute_timings: ExecuteTimings, +} + +/// Contains the results from executing each TransactionBatch with a final result associated with it +/// Note that if !result.is_ok(), bundle_transaction_results will not contain the output for every transaction. +pub struct LoadAndExecuteBundleOutput<'a> { + bundle_transaction_results: Vec>, + result: LoadAndExecuteBundleResult<()>, + metrics: BundleExecutionMetrics, +} + +impl<'a> LoadAndExecuteBundleOutput<'a> { + pub fn executed_ok(&self) -> bool { + self.result.is_ok() + } + + pub fn result(&self) -> &LoadAndExecuteBundleResult<()> { + &self.result + } + + pub fn bundle_transaction_results_mut(&mut self) -> &'a mut [BundleTransactionsOutput] { + &mut self.bundle_transaction_results + } + + pub fn bundle_transaction_results(&self) -> &'a [BundleTransactionsOutput] { + &self.bundle_transaction_results + } + + pub fn executed_transaction_batches(&self) -> Vec> { + self.bundle_transaction_results + .iter() + .map(|br| br.executed_versioned_transactions()) + .collect() + } + + pub fn metrics(&self) -> BundleExecutionMetrics { + self.metrics.clone() + } +} + +#[derive(Clone, Debug, Error)] +pub enum LoadAndExecuteBundleError { + #[error("Bundle execution timed out")] + ProcessingTimeExceeded(Duration), + + #[error( + "A transaction in the bundle encountered a lock error: [signature={:?}, transaction_error={:?}]", + signature, + transaction_error + )] + LockError { + signature: Signature, + transaction_error: TransactionError, + }, + + #[error( + "A transaction in the bundle failed to execute: [signature={:?}, execution_result={:?}", + signature, + execution_result + )] + TransactionError { + signature: Signature, + // Box reduces the size between variants in the Error + execution_result: Box, + }, + + #[error("Invalid pre or post accounts")] + InvalidPreOrPostAccounts, +} + +pub struct BundleTransactionsOutput<'a> { + transactions: &'a [SanitizedTransaction], + load_and_execute_transactions_output: LoadAndExecuteTransactionsOutput, + pre_balance_info: PreBalanceInfo, + post_balance_info: (TransactionBalances, TransactionTokenBalances), + // the length of the outer vector should be the same as transactions.len() + // for indices that didn't get executed, expect a None. + pre_tx_execution_accounts: Vec>>, + post_tx_execution_accounts: Vec>>, +} + +impl<'a> BundleTransactionsOutput<'a> { + pub fn executed_versioned_transactions(&self) -> Vec { + self.transactions + .iter() + .zip( + self.load_and_execute_transactions_output + .execution_results + .iter(), + ) + .filter_map(|(tx, exec_result)| { + exec_result + .was_executed() + .then_some(tx.to_versioned_transaction()) + }) + .collect() + } + + pub fn executed_transactions(&self) -> Vec<&'a SanitizedTransaction> { + self.transactions + .iter() + .zip( + self.load_and_execute_transactions_output + .execution_results + .iter(), + ) + .filter_map(|(tx, exec_result)| exec_result.was_executed().then_some(tx)) + .collect() + } + + pub fn load_and_execute_transactions_output(&self) -> &LoadAndExecuteTransactionsOutput { + &self.load_and_execute_transactions_output + } + + pub fn transactions(&self) -> &[SanitizedTransaction] { + self.transactions + } + + pub fn loaded_transactions_mut(&mut self) -> &mut [TransactionLoadResult] { + &mut self + .load_and_execute_transactions_output + .loaded_transactions + } + + pub fn execution_results(&self) -> &[TransactionExecutionResult] { + &self.load_and_execute_transactions_output.execution_results + } + + pub fn pre_balance_info(&mut self) -> &mut PreBalanceInfo { + &mut self.pre_balance_info + } + + pub fn post_balance_info(&self) -> &(TransactionBalances, TransactionTokenBalances) { + &self.post_balance_info + } + + pub fn pre_tx_execution_accounts(&self) -> &Vec>> { + &self.pre_tx_execution_accounts + } + + pub fn post_tx_execution_accounts(&self) -> &Vec>> { + &self.post_tx_execution_accounts + } +} + +pub type LoadAndExecuteBundleResult = Result; + +/// Return an Error if a transaction was executed and reverted +/// NOTE: `execution_results` are zipped with `sanitized_txs` so it's expected a sanitized tx at +/// position i has a corresponding execution result at position i within the `execution_results` +/// slice +pub fn check_bundle_execution_results<'a>( + execution_results: &'a [TransactionExecutionResult], + sanitized_txs: &'a [SanitizedTransaction], +) -> Result<(), (&'a SanitizedTransaction, &'a TransactionExecutionResult)> { + for (exec_results, sanitized_tx) in execution_results.iter().zip(sanitized_txs) { + match exec_results { + TransactionExecutionResult::Executed { details, .. } => { + if details.status.is_err() { + return Err((sanitized_tx, exec_results)); + } + } + TransactionExecutionResult::NotExecuted(e) => { + if !matches!(e, TransactionError::AccountInUse) { + return Err((sanitized_tx, exec_results)); + } + } + } + } + Ok(()) +} + +/// Executing a bundle is somewhat complicated compared to executing single transactions. In order to +/// avoid duplicate logic for execution and simulation, this function can be leveraged. +/// +/// Assumptions for the caller: +/// - all transactions were signed properly +/// - user has deduplicated transactions inside the bundle +/// +/// TODO (LB): +/// - given a bundle with 3 transactions that write lock the following accounts: [A, B, C], on failure of B +/// we should add in the BundleTransactionsOutput of A and C and return the error for B. +#[allow(clippy::too_many_arguments)] +pub fn load_and_execute_bundle<'a>( + bank: &Bank, + bundle: &'a SanitizedBundle, + // Max blockhash age + max_age: usize, + // Upper bound on execution time for a bundle + max_processing_time: &Duration, + // Execution data logging + enable_cpi_recording: bool, + enable_log_recording: bool, + enable_return_data_recording: bool, + enable_balance_recording: bool, + log_messages_bytes_limit: &Option, + // simulation will not use the Bank's account locks when building the TransactionBatch + // if simulating on an unfrozen bank, this is helpful to avoid stalling replay and use whatever + // state the accounts are in at the current time + is_simulation: bool, + account_overrides: Option<&mut AccountOverrides>, + // these must be the same length as the bundle's transactions + // allows one to read account state before and after execution of each transaction in the bundle + // will use AccountsOverride + Bank + pre_execution_accounts: &Vec>>, + post_execution_accounts: &Vec>>, +) -> LoadAndExecuteBundleOutput<'a> { + if pre_execution_accounts.len() != post_execution_accounts.len() + || post_execution_accounts.len() != bundle.transactions.len() + { + return LoadAndExecuteBundleOutput { + bundle_transaction_results: vec![], + result: Err(LoadAndExecuteBundleError::InvalidPreOrPostAccounts), + metrics: BundleExecutionMetrics::default(), + }; + } + let mut binding = AccountOverrides::default(); + let account_overrides = account_overrides.unwrap_or(&mut binding); + + let mut chunk_start = 0; + let start_time = Instant::now(); + + let mut bundle_transaction_results = vec![]; + let mut metrics = BundleExecutionMetrics::default(); + + while chunk_start != bundle.transactions.len() { + if start_time.elapsed() > *max_processing_time { + trace!("bundle: {} took too long to execute", bundle.bundle_id); + return LoadAndExecuteBundleOutput { + bundle_transaction_results, + metrics, + result: Err(LoadAndExecuteBundleError::ProcessingTimeExceeded( + start_time.elapsed(), + )), + }; + } + + let chunk_end = min(bundle.transactions.len(), chunk_start.saturating_add(128)); + let chunk = &bundle.transactions[chunk_start..chunk_end]; + + // Note: these batches are dropped after execution and before record/commit, which is atypical + // compared to BankingStage which holds account locks until record + commit to avoid race conditions with + // other BankingStage threads. However, the caller of this method, BundleConsumer, will use BundleAccountLocks + // to hold RW locks across all transactions in a bundle until its processed. + let batch = if is_simulation { + bank.prepare_sequential_sanitized_batch_with_results_for_simulation(chunk) + } else { + bank.prepare_sequential_sanitized_batch_with_results(chunk) + }; + + debug!( + "bundle: {} batch num locks ok: {}", + bundle.bundle_id, + batch.lock_results().iter().filter(|lr| lr.is_ok()).count() + ); + + // Ensures that bundle lock results only return either: + // Ok(()) | Err(TransactionError::AccountInUse) + // If the error isn't one of those, then error out + if let Some((transaction, lock_failure)) = batch.check_bundle_lock_results() { + debug!( + "bundle: {} lock error; signature: {} error: {}", + bundle.bundle_id, + transaction.signature(), + lock_failure + ); + return LoadAndExecuteBundleOutput { + bundle_transaction_results, + metrics, + result: Err(LoadAndExecuteBundleError::LockError { + signature: *transaction.signature(), + transaction_error: lock_failure.clone(), + }), + }; + } + + let mut pre_balance_info = PreBalanceInfo::default(); + let (_, collect_balances_us) = measure_us!({ + if enable_balance_recording { + pre_balance_info.native = + bank.collect_balances_with_cache(&batch, Some(account_overrides)); + pre_balance_info.token = collect_token_balances( + bank, + &batch, + &mut pre_balance_info.mint_decimals, + Some(account_overrides), + ); + } + }); + saturating_add_assign!(metrics.collect_balances_us, collect_balances_us); + + let end = min( + chunk_start.saturating_add(batch.sanitized_transactions().len()), + pre_execution_accounts.len(), + ); + + let m = Measure::start("accounts"); + let accounts_requested = &pre_execution_accounts[chunk_start..end]; + let pre_tx_execution_accounts = + get_account_transactions(bank, account_overrides, accounts_requested, &batch); + saturating_add_assign!(metrics.collect_pre_post_accounts_us, m.end_as_us()); + + let (mut load_and_execute_transactions_output, load_execute_us) = measure_us!(bank + .load_and_execute_transactions( + &batch, + max_age, + enable_cpi_recording, + enable_log_recording, + enable_return_data_recording, + &mut metrics.execute_timings, + Some(account_overrides), + *log_messages_bytes_limit, + true + )); + debug!( + "bundle id: {} loaded_transactions: {:?}", + bundle.bundle_id, load_and_execute_transactions_output.loaded_transactions + ); + saturating_add_assign!(metrics.load_execute_us, load_execute_us); + + // All transactions within a bundle are expected to be executable + not fail + // If there's any transactions that executed and failed or didn't execute due to + // unexpected failures (not locking related), bail out of bundle execution early. + if let Err((failing_tx, exec_result)) = check_bundle_execution_results( + load_and_execute_transactions_output + .execution_results + .as_slice(), + batch.sanitized_transactions(), + ) { + // TODO (LB): we should try to return partial results here for successful bundles in a parallel batch. + // given a bundle that write locks the following accounts [[A], [B], [C]] + // when B fails, we could return the execution results for A and C, but leave B out. + // however, if we have bundle that write locks accounts [[A_1], [A_2], [B], [C]] and B fails + // we'll get the results for A_1 but not [A_2], [B], [C] due to the way this loop executes. + debug!( + "bundle: {} execution error; signature: {} error: {:?}", + bundle.bundle_id, + failing_tx.signature(), + exec_result + ); + return LoadAndExecuteBundleOutput { + bundle_transaction_results, + metrics, + result: Err(LoadAndExecuteBundleError::TransactionError { + signature: *failing_tx.signature(), + execution_result: Box::new(exec_result.clone()), + }), + }; + } + + // If none of the transactions were executed, most likely an AccountInUse error + // need to retry to ensure that all transactions in the bundle are executed. + if !load_and_execute_transactions_output + .execution_results + .iter() + .any(|r| r.was_executed()) + { + saturating_add_assign!(metrics.num_retries, 1); + debug!( + "bundle: {} no transaction executed, retrying", + bundle.bundle_id + ); + continue; + } + + // Cache accounts so next iterations of loop can load cached state instead of using + // AccountsDB, which will contain stale account state because results aren't committed + // to the bank yet. + // NOTE: Bank::collect_accounts_to_store does not handle any state changes related to + // failed, non-nonce transactions. + let m = Measure::start("cache"); + let accounts = bank.collect_accounts_to_store( + batch.sanitized_transactions(), + &load_and_execute_transactions_output.execution_results, + &mut load_and_execute_transactions_output.loaded_transactions, + ); + for (pubkey, data) in accounts { + account_overrides.set_account(pubkey, Some(data.clone())); + } + saturating_add_assign!(metrics.cache_accounts_us, m.end_as_us()); + + let end = max( + chunk_start.saturating_add(batch.sanitized_transactions().len()), + post_execution_accounts.len(), + ); + + let m = Measure::start("accounts"); + let accounts_requested = &post_execution_accounts[chunk_start..end]; + let post_tx_execution_accounts = + get_account_transactions(bank, account_overrides, accounts_requested, &batch); + saturating_add_assign!(metrics.collect_pre_post_accounts_us, m.end_as_us()); + + let ((post_balances, post_token_balances), collect_balances_us) = + measure_us!(if enable_balance_recording { + let post_balances = + bank.collect_balances_with_cache(&batch, Some(account_overrides)); + let post_token_balances = collect_token_balances( + bank, + &batch, + &mut pre_balance_info.mint_decimals, + Some(account_overrides), + ); + (post_balances, post_token_balances) + } else { + ( + TransactionBalances::default(), + TransactionTokenBalances::default(), + ) + }); + saturating_add_assign!(metrics.collect_balances_us, collect_balances_us); + + let processing_end = batch.lock_results().iter().position(|lr| lr.is_err()); + if let Some(end) = processing_end { + chunk_start = chunk_start.saturating_add(end); + } else { + chunk_start = chunk_end; + } + + bundle_transaction_results.push(BundleTransactionsOutput { + transactions: chunk, + load_and_execute_transactions_output, + pre_balance_info, + post_balance_info: (post_balances, post_token_balances), + pre_tx_execution_accounts, + post_tx_execution_accounts, + }); + } + + LoadAndExecuteBundleOutput { + bundle_transaction_results, + metrics, + result: Ok(()), + } +} + +fn get_account_transactions( + bank: &Bank, + account_overrides: &AccountOverrides, + accounts: &[Option>], + batch: &TransactionBatch, +) -> Vec>> { + let iter = izip!(batch.lock_results().iter(), accounts.iter()); + + iter.map(|(lock_result, accounts_requested)| { + if lock_result.is_ok() { + accounts_requested.as_ref().map(|accounts_requested| { + accounts_requested + .iter() + .map(|a| match account_overrides.get(a) { + None => (*a, bank.get_account(a).unwrap_or_default()), + Some(data) => (*a, data.clone()), + }) + .collect() + }) + } else { + None + } + }) + .collect() +} + +#[cfg(test)] +mod tests { + use { + crate::bundle_execution::{load_and_execute_bundle, LoadAndExecuteBundleError}, + assert_matches::assert_matches, + solana_ledger::genesis_utils::create_genesis_config, + solana_runtime::{bank::Bank, genesis_utils::GenesisConfigInfo}, + solana_sdk::{ + bundle::{derive_bundle_id_from_sanitized_transactions, SanitizedBundle}, + clock::MAX_PROCESSING_AGE, + pubkey::Pubkey, + signature::{Keypair, Signer}, + system_transaction::transfer, + transaction::{SanitizedTransaction, Transaction, TransactionError}, + }, + std::{ + sync::{Arc, Barrier}, + thread::{sleep, spawn}, + time::Duration, + }, + }; + + const MAX_PROCESSING_TIME: Duration = Duration::from_secs(1); + const LOG_MESSAGE_BYTES_LIMITS: Option = Some(100_000); + const MINT_AMOUNT_LAMPORTS: u64 = 1_000_000; + + fn create_simple_test_bank(lamports: u64) -> (GenesisConfigInfo, Arc) { + let genesis_config_info = create_genesis_config(lamports); + let bank = Arc::new(Bank::new_for_tests(&genesis_config_info.genesis_config)); + (genesis_config_info, bank) + } + + fn make_bundle(txs: &[Transaction]) -> SanitizedBundle { + let transactions: Vec<_> = txs + .iter() + .map(|tx| SanitizedTransaction::try_from_legacy_transaction(tx.clone()).unwrap()) + .collect(); + + let bundle_id = derive_bundle_id_from_sanitized_transactions(&transactions); + + SanitizedBundle { + transactions, + bundle_id, + } + } + + fn find_account_index(tx: &Transaction, account: &Pubkey) -> Option { + tx.message + .account_keys + .iter() + .position(|pubkey| account == pubkey) + } + + /// A single, valid bundle shall execute successfully and return the correct BundleTransactionsOutput content + #[test] + fn test_single_transaction_bundle_success() { + const TRANSFER_AMOUNT: u64 = 1_000; + let (genesis_config_info, bank) = create_simple_test_bank(MINT_AMOUNT_LAMPORTS); + let lamports_per_signature = bank + .get_lamports_per_signature_for_blockhash(&genesis_config_info.genesis_config.hash()) + .unwrap(); + + let kp = Keypair::new(); + let transactions = vec![transfer( + &genesis_config_info.mint_keypair, + &kp.pubkey(), + TRANSFER_AMOUNT, + genesis_config_info.genesis_config.hash(), + )]; + let bundle = make_bundle(&transactions); + let default_accounts = vec![None; bundle.transactions.len()]; + + let execution_result = load_and_execute_bundle( + &bank, + &bundle, + MAX_PROCESSING_AGE, + &MAX_PROCESSING_TIME, + true, + true, + true, + true, + &LOG_MESSAGE_BYTES_LIMITS, + false, + None, + &default_accounts, + &default_accounts, + ); + + // make sure the bundle succeeded + assert!(execution_result.result.is_ok()); + + // check to make sure there was one batch returned with one transaction that was the same that was put in + assert_eq!(execution_result.bundle_transaction_results.len(), 1); + let tx_result = execution_result.bundle_transaction_results.get(0).unwrap(); + assert_eq!(tx_result.transactions.len(), 1); + assert_eq!(tx_result.transactions[0], bundle.transactions[0]); + + // make sure the transaction executed successfully + assert_eq!( + tx_result + .load_and_execute_transactions_output + .execution_results + .len(), + 1 + ); + let execution_result = tx_result + .load_and_execute_transactions_output + .execution_results + .get(0) + .unwrap(); + assert!(execution_result.was_executed()); + assert!(execution_result.was_executed_successfully()); + + // Make sure the post-balances are correct + assert_eq!(tx_result.pre_balance_info.native.len(), 1); + let post_tx_sol_balances = tx_result.post_balance_info.0.get(0).unwrap(); + + let minter_message_index = + find_account_index(&transactions[0], &genesis_config_info.mint_keypair.pubkey()) + .unwrap(); + let receiver_message_index = find_account_index(&transactions[0], &kp.pubkey()).unwrap(); + + assert_eq!( + post_tx_sol_balances[minter_message_index], + MINT_AMOUNT_LAMPORTS - lamports_per_signature - TRANSFER_AMOUNT + ); + assert_eq!( + post_tx_sol_balances[receiver_message_index], + TRANSFER_AMOUNT + ); + } + + /// Test a simple failure + #[test] + fn test_single_transaction_bundle_fail() { + const TRANSFER_AMOUNT: u64 = 1_000; + let (genesis_config_info, bank) = create_simple_test_bank(MINT_AMOUNT_LAMPORTS); + + // kp has no funds, transfer will fail + let kp = Keypair::new(); + let transactions = vec![transfer( + &kp, + &kp.pubkey(), + TRANSFER_AMOUNT, + genesis_config_info.genesis_config.hash(), + )]; + let bundle = make_bundle(&transactions); + + let default_accounts = vec![None; bundle.transactions.len()]; + let execution_result = load_and_execute_bundle( + &bank, + &bundle, + MAX_PROCESSING_AGE, + &MAX_PROCESSING_TIME, + true, + true, + true, + true, + &LOG_MESSAGE_BYTES_LIMITS, + false, + None, + &default_accounts, + &default_accounts, + ); + + assert_eq!(execution_result.bundle_transaction_results.len(), 0); + + assert!(execution_result.result.is_err()); + + match execution_result.result.unwrap_err() { + LoadAndExecuteBundleError::ProcessingTimeExceeded(_) + | LoadAndExecuteBundleError::LockError { .. } + | LoadAndExecuteBundleError::InvalidPreOrPostAccounts => { + unreachable!(); + } + LoadAndExecuteBundleError::TransactionError { + signature, + execution_result, + } => { + assert_eq!(signature, *bundle.transactions[0].signature()); + assert!(!execution_result.was_executed()); + } + } + } + + /// Tests a multi-tx bundle that succeeds. Checks the returned results + #[test] + fn test_multi_transaction_bundle_success() { + const TRANSFER_AMOUNT_1: u64 = 100_000; + const TRANSFER_AMOUNT_2: u64 = 50_000; + const TRANSFER_AMOUNT_3: u64 = 10_000; + let (genesis_config_info, bank) = create_simple_test_bank(MINT_AMOUNT_LAMPORTS); + let lamports_per_signature = bank + .get_lamports_per_signature_for_blockhash(&genesis_config_info.genesis_config.hash()) + .unwrap(); + + // mint transfers 100k to 1 + // 1 transfers 50k to 2 + // 2 transfers 10k to 3 + // should get executed in 3 batches [[1], [2], [3]] + let kp1 = Keypair::new(); + let kp2 = Keypair::new(); + let kp3 = Keypair::new(); + let transactions = vec![ + transfer( + &genesis_config_info.mint_keypair, + &kp1.pubkey(), + TRANSFER_AMOUNT_1, + genesis_config_info.genesis_config.hash(), + ), + transfer( + &kp1, + &kp2.pubkey(), + TRANSFER_AMOUNT_2, + genesis_config_info.genesis_config.hash(), + ), + transfer( + &kp2, + &kp3.pubkey(), + TRANSFER_AMOUNT_3, + genesis_config_info.genesis_config.hash(), + ), + ]; + let bundle = make_bundle(&transactions); + + let default_accounts = vec![None; bundle.transactions.len()]; + let execution_result = load_and_execute_bundle( + &bank, + &bundle, + MAX_PROCESSING_AGE, + &MAX_PROCESSING_TIME, + true, + true, + true, + true, + &LOG_MESSAGE_BYTES_LIMITS, + false, + None, + &default_accounts, + &default_accounts, + ); + + assert!(execution_result.result.is_ok()); + assert_eq!(execution_result.bundle_transaction_results.len(), 3); + + // first batch contains the first tx that was executed + assert_eq!( + execution_result.bundle_transaction_results[0].transactions, + bundle.transactions + ); + assert_eq!( + execution_result.bundle_transaction_results[0] + .load_and_execute_transactions_output + .execution_results + .len(), + 3 + ); + assert!(execution_result.bundle_transaction_results[0] + .load_and_execute_transactions_output + .execution_results[0] + .was_executed_successfully()); + assert_eq!( + execution_result.bundle_transaction_results[0] + .load_and_execute_transactions_output + .execution_results[1] + .flattened_result(), + Err(TransactionError::AccountInUse) + ); + assert_eq!( + execution_result.bundle_transaction_results[0] + .load_and_execute_transactions_output + .execution_results[2] + .flattened_result(), + Err(TransactionError::AccountInUse) + ); + assert_eq!( + execution_result.bundle_transaction_results[0] + .pre_balance_info + .native + .len(), + 3 + ); + assert_eq!( + execution_result.bundle_transaction_results[0] + .post_balance_info + .0 + .len(), + 3 + ); + + let minter_index = + find_account_index(&transactions[0], &genesis_config_info.mint_keypair.pubkey()) + .unwrap(); + let kp1_index = find_account_index(&transactions[0], &kp1.pubkey()).unwrap(); + + assert_eq!( + execution_result.bundle_transaction_results[0] + .post_balance_info + .0[0][minter_index], + MINT_AMOUNT_LAMPORTS - lamports_per_signature - TRANSFER_AMOUNT_1 + ); + + assert_eq!( + execution_result.bundle_transaction_results[0] + .post_balance_info + .0[0][kp1_index], + TRANSFER_AMOUNT_1 + ); + + // in the second batch, the second transaction was executed + assert_eq!( + execution_result.bundle_transaction_results[1] + .transactions + .to_owned(), + bundle.transactions[1..] + ); + assert_eq!( + execution_result.bundle_transaction_results[1] + .load_and_execute_transactions_output + .execution_results + .len(), + 2 + ); + assert!(execution_result.bundle_transaction_results[1] + .load_and_execute_transactions_output + .execution_results[0] + .was_executed_successfully()); + assert_eq!( + execution_result.bundle_transaction_results[1] + .load_and_execute_transactions_output + .execution_results[1] + .flattened_result(), + Err(TransactionError::AccountInUse) + ); + + assert_eq!( + execution_result.bundle_transaction_results[1] + .pre_balance_info + .native + .len(), + 2 + ); + assert_eq!( + execution_result.bundle_transaction_results[1] + .post_balance_info + .0 + .len(), + 2 + ); + + let kp1_index = find_account_index(&transactions[1], &kp1.pubkey()).unwrap(); + let kp2_index = find_account_index(&transactions[1], &kp2.pubkey()).unwrap(); + + assert_eq!( + execution_result.bundle_transaction_results[1] + .post_balance_info + .0[0][kp1_index], + TRANSFER_AMOUNT_1 - lamports_per_signature - TRANSFER_AMOUNT_2 + ); + + assert_eq!( + execution_result.bundle_transaction_results[1] + .post_balance_info + .0[0][kp2_index], + TRANSFER_AMOUNT_2 + ); + + // in the third batch, the third transaction was executed + assert_eq!( + execution_result.bundle_transaction_results[2] + .transactions + .to_owned(), + bundle.transactions[2..] + ); + assert_eq!( + execution_result.bundle_transaction_results[2] + .load_and_execute_transactions_output + .execution_results + .len(), + 1 + ); + assert!(execution_result.bundle_transaction_results[2] + .load_and_execute_transactions_output + .execution_results[0] + .was_executed_successfully()); + + assert_eq!( + execution_result.bundle_transaction_results[2] + .pre_balance_info + .native + .len(), + 1 + ); + assert_eq!( + execution_result.bundle_transaction_results[2] + .post_balance_info + .0 + .len(), + 1 + ); + + let kp2_index = find_account_index(&transactions[2], &kp2.pubkey()).unwrap(); + let kp3_index = find_account_index(&transactions[2], &kp3.pubkey()).unwrap(); + + assert_eq!( + execution_result.bundle_transaction_results[2] + .post_balance_info + .0[0][kp2_index], + TRANSFER_AMOUNT_2 - lamports_per_signature - TRANSFER_AMOUNT_3 + ); + + assert_eq!( + execution_result.bundle_transaction_results[2] + .post_balance_info + .0[0][kp3_index], + TRANSFER_AMOUNT_3 + ); + } + + /// Tests a multi-tx bundle with the middle transaction failing. + #[test] + fn test_multi_transaction_bundle_fails() { + let (genesis_config_info, bank) = create_simple_test_bank(MINT_AMOUNT_LAMPORTS); + + let kp1 = Keypair::new(); + let kp2 = Keypair::new(); + let kp3 = Keypair::new(); + let transactions = vec![ + transfer( + &genesis_config_info.mint_keypair, + &kp1.pubkey(), + 100_000, + genesis_config_info.genesis_config.hash(), + ), + transfer( + &kp2, + &kp3.pubkey(), + 100_000, + genesis_config_info.genesis_config.hash(), + ), + transfer( + &kp1, + &kp2.pubkey(), + 100_000, + genesis_config_info.genesis_config.hash(), + ), + ]; + let bundle = make_bundle(&transactions); + + let default_accounts = vec![None; bundle.transactions.len()]; + let execution_result = load_and_execute_bundle( + &bank, + &bundle, + MAX_PROCESSING_AGE, + &MAX_PROCESSING_TIME, + true, + true, + true, + true, + &LOG_MESSAGE_BYTES_LIMITS, + false, + None, + &default_accounts, + &default_accounts, + ); + match execution_result.result.as_ref().unwrap_err() { + LoadAndExecuteBundleError::ProcessingTimeExceeded(_) + | LoadAndExecuteBundleError::LockError { .. } + | LoadAndExecuteBundleError::InvalidPreOrPostAccounts => { + unreachable!(); + } + + LoadAndExecuteBundleError::TransactionError { + signature, + execution_result: tx_failure, + } => { + assert_eq!(signature, bundle.transactions[1].signature()); + assert_eq!( + tx_failure.flattened_result(), + Err(TransactionError::AccountNotFound) + ); + assert_eq!(execution_result.bundle_transaction_results().len(), 0); + } + } + } + + /// Tests that when the max processing time is exceeded, the bundle is an error + #[test] + fn test_bundle_max_processing_time_exceeded() { + let (genesis_config_info, bank) = create_simple_test_bank(MINT_AMOUNT_LAMPORTS); + + let kp = Keypair::new(); + let transactions = vec![transfer( + &genesis_config_info.mint_keypair, + &kp.pubkey(), + 1, + genesis_config_info.genesis_config.hash(), + )]; + let bundle = make_bundle(&transactions); + + let locked_transfer = vec![SanitizedTransaction::from_transaction_for_tests(transfer( + &genesis_config_info.mint_keypair, + &kp.pubkey(), + 2, + genesis_config_info.genesis_config.hash(), + ))]; + + // locks it and prevents execution bc write lock on genesis_config_info.mint_keypair + kp.pubkey() held + let _batch = bank.prepare_sanitized_batch(&locked_transfer); + + let default = vec![None; bundle.transactions.len()]; + let result = load_and_execute_bundle( + &bank, + &bundle, + MAX_PROCESSING_AGE, + &Duration::from_millis(100), + false, + false, + false, + false, + &None, + false, + None, + &default, + &default, + ); + assert_matches!( + result.result, + Err(LoadAndExecuteBundleError::ProcessingTimeExceeded(_)) + ); + } + + #[test] + fn test_simulate_bundle_with_locked_account_works() { + let (genesis_config_info, bank) = create_simple_test_bank(MINT_AMOUNT_LAMPORTS); + + let kp = Keypair::new(); + let transactions = vec![transfer( + &genesis_config_info.mint_keypair, + &kp.pubkey(), + 1, + genesis_config_info.genesis_config.hash(), + )]; + let bundle = make_bundle(&transactions); + + let locked_transfer = vec![SanitizedTransaction::from_transaction_for_tests(transfer( + &genesis_config_info.mint_keypair, + &kp.pubkey(), + 2, + genesis_config_info.genesis_config.hash(), + ))]; + + let _batch = bank.prepare_sanitized_batch(&locked_transfer); + + // simulation ignores account locks so you can simulate bundles on unfrozen banks + let default = vec![None; bundle.transactions.len()]; + let result = load_and_execute_bundle( + &bank, + &bundle, + MAX_PROCESSING_AGE, + &Duration::from_millis(100), + false, + false, + false, + false, + &None, + true, + None, + &default, + &default, + ); + assert!(result.result.is_ok()); + } + + /// Creates a multi-tx bundle and temporarily locks the accounts for one of the transactions in a bundle. + /// Ensures the result is what's expected + #[test] + fn test_bundle_works_with_released_account_locks() { + let (genesis_config_info, bank) = create_simple_test_bank(MINT_AMOUNT_LAMPORTS); + let barrier = Arc::new(Barrier::new(2)); + + let kp = Keypair::new(); + + let transactions = vec![transfer( + &genesis_config_info.mint_keypair, + &kp.pubkey(), + 1, + genesis_config_info.genesis_config.hash(), + )]; + let bundle = Arc::new(make_bundle(&transactions)); + + let locked_transfer = vec![SanitizedTransaction::from_transaction_for_tests(transfer( + &genesis_config_info.mint_keypair, + &kp.pubkey(), + 2, + genesis_config_info.genesis_config.hash(), + ))]; + + // background thread locks the accounts for a bit then unlocks them + let thread = { + let barrier = barrier.clone(); + let bank = bank.clone(); + spawn(move || { + let batch = bank.prepare_sanitized_batch(&locked_transfer); + barrier.wait(); + sleep(Duration::from_millis(500)); + drop(batch); + }) + }; + + let _ = barrier.wait(); + + // load_and_execute_bundle should spin for a bit then process after the 500ms sleep is over + let default = vec![None; bundle.transactions.len()]; + let result = load_and_execute_bundle( + &bank, + &bundle, + MAX_PROCESSING_AGE, + &Duration::from_secs(2), + false, + false, + false, + false, + &None, + false, + None, + &default, + &default, + ); + assert!(result.result.is_ok()); + + thread.join().unwrap(); + } + + /// Tests that when the max processing time is exceeded, the bundle is an error + #[test] + fn test_bundle_bad_pre_post_accounts() { + let (genesis_config_info, bank) = create_simple_test_bank(MINT_AMOUNT_LAMPORTS); + + let kp = Keypair::new(); + let transactions = vec![transfer( + &genesis_config_info.mint_keypair, + &kp.pubkey(), + 1, + genesis_config_info.genesis_config.hash(), + )]; + let bundle = make_bundle(&transactions); + + let result = load_and_execute_bundle( + &bank, + &bundle, + MAX_PROCESSING_AGE, + &Duration::from_millis(100), + false, + false, + false, + false, + &None, + false, + None, + &vec![None; 2], + &vec![None; bundle.transactions.len()], + ); + assert_matches!( + result.result, + Err(LoadAndExecuteBundleError::InvalidPreOrPostAccounts) + ); + + let result = load_and_execute_bundle( + &bank, + &bundle, + MAX_PROCESSING_AGE, + &Duration::from_millis(100), + false, + false, + false, + false, + &None, + false, + None, + &vec![None; bundle.transactions.len()], + &vec![None; 2], + ); + assert_matches!( + result.result, + Err(LoadAndExecuteBundleError::InvalidPreOrPostAccounts) + ); + } +} diff --git a/bundle/src/lib.rs b/bundle/src/lib.rs new file mode 100644 index 0000000000..a93e0d3d17 --- /dev/null +++ b/bundle/src/lib.rs @@ -0,0 +1,60 @@ +use { + crate::bundle_execution::LoadAndExecuteBundleError, + anchor_lang::error::Error, + serde::{Deserialize, Serialize}, + solana_poh::poh_recorder::PohRecorderError, + solana_sdk::pubkey::Pubkey, + thiserror::Error, +}; + +pub mod bundle_execution; + +#[derive(Error, Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum TipError { + #[error("account is missing from bank: {0}")] + AccountMissing(Pubkey), + + #[error("Anchor error: {0}")] + AnchorError(String), + + #[error("Lock error")] + LockError, + + #[error("Error executing initialize programs")] + InitializeProgramsError, + + #[error("Error cranking tip programs")] + CrankTipError, +} + +impl From for TipError { + fn from(anchor_err: Error) -> Self { + match anchor_err { + Error::AnchorError(e) => Self::AnchorError(e.error_msg), + Error::ProgramError(e) => Self::AnchorError(e.to_string()), + } + } +} + +pub type BundleExecutionResult = Result; + +#[derive(Error, Debug, Clone)] +pub enum BundleExecutionError { + #[error("The bank has hit the max allotted time for processing transactions")] + BankProcessingTimeLimitReached, + + #[error("The bundle exceeds the cost model")] + ExceedsCostModel, + + #[error("Runtime error while executing the bundle: {0}")] + TransactionFailure(#[from] LoadAndExecuteBundleError), + + #[error("Error locking bundle because a transaction is malformed")] + LockError, + + #[error("PoH record error: {0}")] + PohRecordError(#[from] PohRecorderError), + + #[error("Tip payment error {0}")] + TipError(#[from] TipError), +} diff --git a/ci/buildkite-pipeline-in-disk.sh b/ci/buildkite-pipeline-in-disk.sh index 113b009aa4..98c35ba157 100755 --- a/ci/buildkite-pipeline-in-disk.sh +++ b/ci/buildkite-pipeline-in-disk.sh @@ -289,7 +289,7 @@ if [[ -n $BUILDKITE_TAG ]]; then start_pipeline "Tag pipeline for $BUILDKITE_TAG" annotate --style info --context release-tag \ - "https://github.com/solana-labs/solana/releases/$BUILDKITE_TAG" + "https://github.com/jito-foundation/jito-solana/releases/$BUILDKITE_TAG" # Jump directly to the secondary build to publish release artifacts quickly trigger_secondary_step @@ -307,7 +307,7 @@ if [[ $BUILDKITE_BRANCH =~ ^pull ]]; then # Add helpful link back to the corresponding Github Pull Request annotate --style info --context pr-backlink \ - "Github Pull Request: https://github.com/solana-labs/solana/$BUILDKITE_BRANCH" + "Github Pull Request: https://github.com/jito-foundation/jito-solana/$BUILDKITE_BRANCH" if [[ $GITHUB_USER = "dependabot[bot]" ]]; then command_step dependabot "ci/dependabot-pr.sh" 5 diff --git a/ci/buildkite-pipeline.sh b/ci/buildkite-pipeline.sh index 8535905bfe..c3aaffd9f8 100755 --- a/ci/buildkite-pipeline.sh +++ b/ci/buildkite-pipeline.sh @@ -313,7 +313,7 @@ if [[ -n $BUILDKITE_TAG ]]; then start_pipeline "Tag pipeline for $BUILDKITE_TAG" annotate --style info --context release-tag \ - "https://github.com/solana-labs/solana/releases/$BUILDKITE_TAG" + "https://github.com/jito-foundation/jito-solana/releases/$BUILDKITE_TAG" # Jump directly to the secondary build to publish release artifacts quickly trigger_secondary_step @@ -331,7 +331,7 @@ if [[ $BUILDKITE_BRANCH =~ ^pull ]]; then # Add helpful link back to the corresponding Github Pull Request annotate --style info --context pr-backlink \ - "Github Pull Request: https://github.com/solana-labs/solana/$BUILDKITE_BRANCH" + "Github Pull Request: https://github.com/jito-foundation/jito-solana/$BUILDKITE_BRANCH" if [[ $GITHUB_USER = "dependabot[bot]" ]]; then command_step dependabot "ci/dependabot-pr.sh" 5 diff --git a/ci/buildkite-secondary.yml b/ci/buildkite-secondary.yml index c8bf7b4fd9..48aa4d95f4 100644 --- a/ci/buildkite-secondary.yml +++ b/ci/buildkite-secondary.yml @@ -18,34 +18,34 @@ steps: agents: queue: "release-build" timeout_in_minutes: 5 - - wait - - name: "publish docker" - command: "sdk/docker-solana/build.sh" - agents: - queue: "release-build" - timeout_in_minutes: 60 - - name: "publish crate" - command: "ci/publish-crate.sh" - agents: - queue: "release-build" - retry: - manual: - permit_on_passed: true - timeout_in_minutes: 240 - branches: "!master" - - name: "publish tarball (aarch64-apple-darwin)" - command: "ci/publish-tarball.sh" - agents: - queue: "release-build-aarch64-apple-darwin" - retry: - manual: - permit_on_passed: true - timeout_in_minutes: 60 - - name: "publish tarball (x86_64-apple-darwin)" - command: "ci/publish-tarball.sh" - agents: - queue: "release-build-x86_64-apple-darwin" - retry: - manual: - permit_on_passed: true - timeout_in_minutes: 60 +# - wait +# - name: "publish docker" +# command: "sdk/docker-solana/build.sh" +# agents: +# queue: "release-build" +# timeout_in_minutes: 60 +# - name: "publish crate" +# command: "ci/publish-crate.sh" +# agents: +# queue: "release-build" +# retry: +# manual: +# permit_on_passed: true +# timeout_in_minutes: 240 +# branches: "!master" +# - name: "publish tarball (aarch64-apple-darwin)" +# command: "ci/publish-tarball.sh" +# agents: +# queue: "release-build-aarch64-apple-darwin" +# retry: +# manual: +# permit_on_passed: true +# timeout_in_minutes: 60 +# - name: "publish tarball (x86_64-apple-darwin)" +# command: "ci/publish-tarball.sh" +# agents: +# queue: "release-build-x86_64-apple-darwin" +# retry: +# manual: +# permit_on_passed: true +# timeout_in_minutes: 60 diff --git a/ci/buildkite-solana-private.sh b/ci/buildkite-solana-private.sh index ede70e6229..d409667764 100755 --- a/ci/buildkite-solana-private.sh +++ b/ci/buildkite-solana-private.sh @@ -269,7 +269,7 @@ pull_or_push_steps() { # start_pipeline "Tag pipeline for $BUILDKITE_TAG" # annotate --style info --context release-tag \ -# "https://github.com/solana-labs/solana/releases/$BUILDKITE_TAG" +# "https://github.com/jito-foundation/jito-solana/releases/$BUILDKITE_TAG" # # Jump directly to the secondary build to publish release artifacts quickly # trigger_secondary_step @@ -287,7 +287,7 @@ if [[ $BUILDKITE_BRANCH =~ ^pull ]]; then # Add helpful link back to the corresponding Github Pull Request annotate --style info --context pr-backlink \ - "Github Pull Request: https://github.com/solana-labs/solana/$BUILDKITE_BRANCH" + "Github Pull Request: https://github.com/jito-foundation/jito-solana/$BUILDKITE_BRANCH" if [[ $GITHUB_USER = "dependabot[bot]" ]]; then command_step dependabot "ci/dependabot-pr.sh" 5 diff --git a/ci/channel-info.sh b/ci/channel-info.sh index c82806454d..101583307f 100755 --- a/ci/channel-info.sh +++ b/ci/channel-info.sh @@ -11,7 +11,7 @@ here="$(dirname "$0")" # shellcheck source=ci/semver_bash/semver.sh source "$here"/semver_bash/semver.sh -remote=https://github.com/solana-labs/solana.git +remote=https://github.com/jito-foundation/jito-solana.git # Fetch all vX.Y.Z tags # diff --git a/ci/check-crates.sh b/ci/check-crates.sh index 655504ea11..d6a9ad9c39 100755 --- a/ci/check-crates.sh +++ b/ci/check-crates.sh @@ -31,6 +31,9 @@ printf "%s\n" "${files[@]}" error_count=0 for file in "${files[@]}"; do read -r crate_name package_publish workspace < <(toml get "$file" . | jq -r '(.package.name | tostring)+" "+(.package.publish | tostring)+" "+(.workspace | tostring)') + if [ "$crate_name" == "solana-bundle" ]; then + continue + fi echo "=== $crate_name ($file) ===" if [[ $package_publish = 'false' ]]; then diff --git a/ci/publish-installer.sh b/ci/publish-installer.sh index 4b5345ae0d..71d8ef6985 100755 --- a/ci/publish-installer.sh +++ b/ci/publish-installer.sh @@ -26,14 +26,14 @@ fi # upload install script source ci/upload-ci-artifact.sh -cat >release.solana.com-install <release.jito.wtf-install <>release.solana.com-install +cat install/solana-install-init.sh >>release.jito.wtf-install echo --- AWS S3 Store: "install" -upload-s3-artifact "/solana/release.solana.com-install" "s3://release.solana.com/$CHANNEL_OR_TAG/install" +upload-s3-artifact "/solana/release.jito.wtf-install" "s3://release.jito.wtf/$CHANNEL_OR_TAG/install" echo Published to: -ci/format-url.sh https://release.solana.com/"$CHANNEL_OR_TAG"/install +ci/format-url.sh https://release.jito.wtf/"$CHANNEL_OR_TAG"/install diff --git a/ci/publish-tarball.sh b/ci/publish-tarball.sh index ff72bb7da2..ea132a73e1 100755 --- a/ci/publish-tarball.sh +++ b/ci/publish-tarball.sh @@ -119,16 +119,16 @@ for file in "${TARBALL_BASENAME}"-$TARGET.tar.bz2 "${TARBALL_BASENAME}"-$TARGET. if [[ -n $BUILDKITE ]]; then echo --- AWS S3 Store: "$file" - upload-s3-artifact "/solana/$file" s3://release.solana.com/"$CHANNEL_OR_TAG"/"$file" + upload-s3-artifact "/solana/$file" s3://release.jito.wtf/"$CHANNEL_OR_TAG"/"$file" echo Published to: - $DRYRUN ci/format-url.sh https://release.solana.com/"$CHANNEL_OR_TAG"/"$file" + $DRYRUN ci/format-url.sh https://release.jito.wtf/"$CHANNEL_OR_TAG"/"$file" if [[ -n $TAG ]]; then ci/upload-github-release-asset.sh "$file" fi elif [[ -n $TRAVIS ]]; then - # .travis.yml uploads everything in the travis-s3-upload/ directory to release.solana.com + # .travis.yml uploads everything in the travis-s3-upload/ directory to release.jito.wtf mkdir -p travis-s3-upload/"$CHANNEL_OR_TAG" cp -v "$file" travis-s3-upload/"$CHANNEL_OR_TAG"/ diff --git a/ci/upload-github-release-asset.sh b/ci/upload-github-release-asset.sh index ca2ae2a8f6..fb4de1af9e 100755 --- a/ci/upload-github-release-asset.sh +++ b/ci/upload-github-release-asset.sh @@ -26,7 +26,7 @@ fi # Force CI_REPO_SLUG since sometimes # BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG is not set correctly, causing the # artifact upload to fail -CI_REPO_SLUG=solana-labs/solana +CI_REPO_SLUG=jito-foundation/jito-solana #if [[ -z $CI_REPO_SLUG ]]; then # echo Error: CI_REPO_SLUG not defined # exit 1 diff --git a/core/Cargo.toml b/core/Cargo.toml index fcab8ff877..6f3e2402ec 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -14,6 +14,7 @@ edition = { workspace = true } codecov = { repository = "solana-labs/solana", branch = "master", service = "github" } [dependencies] +anchor-lang = { workspace = true } base64 = { workspace = true } bincode = { workspace = true } bs58 = { workspace = true } @@ -26,11 +27,16 @@ etcd-client = { workspace = true, features = ["tls"] } futures = { workspace = true } histogram = { workspace = true } itertools = { workspace = true } +jito-protos = { workspace = true } +jito-tip-distribution = { workspace = true } +jito-tip-payment = { workspace = true } lazy_static = { workspace = true } log = { workspace = true } lru = { workspace = true } min-max-heap = { workspace = true } num_enum = { workspace = true } +prost = { workspace = true } +prost-types = { workspace = true } quinn = { workspace = true } rand = { workspace = true } rand_chacha = { workspace = true } @@ -43,6 +49,7 @@ serde_bytes = { workspace = true } serde_derive = { workspace = true } solana-accounts-db = { workspace = true } solana-bloom = { workspace = true } +solana-bundle = { workspace = true } solana-client = { workspace = true } solana-cost-model = { workspace = true } solana-entry = { workspace = true } @@ -62,6 +69,7 @@ solana-rayon-threadlimit = { workspace = true } solana-rpc = { workspace = true } solana-rpc-client-api = { workspace = true } solana-runtime = { workspace = true } +solana-runtime-plugin = { workspace = true } solana-sdk = { workspace = true } solana-send-transaction-service = { workspace = true } solana-streamer = { workspace = true } @@ -77,6 +85,7 @@ sys-info = { workspace = true } tempfile = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["full"] } +tonic = { workspace = true } trees = { workspace = true } [dev-dependencies] @@ -85,10 +94,13 @@ fs_extra = { workspace = true } raptorq = { workspace = true } serde_json = { workspace = true } serial_test = { workspace = true } +solana-accounts-db = { workspace = true } # See order-crates-for-publishing.py for using this unusual `path = "."` +solana-bundle = { workspace = true } solana-core = { path = ".", features = ["dev-context-only-utils"] } solana-logger = { workspace = true } solana-program-runtime = { workspace = true } +solana-program-test = { workspace = true } solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } solana-stake-program = { workspace = true } @@ -101,6 +113,7 @@ sysctl = { workspace = true } [build-dependencies] rustc_version = { workspace = true } +tonic-build = { workspace = true } [features] dev-context-only-utils = [] diff --git a/core/benches/banking_stage.rs b/core/benches/banking_stage.rs index b399abba0a..4b27e523e2 100644 --- a/core/benches/banking_stage.rs +++ b/core/benches/banking_stage.rs @@ -22,6 +22,7 @@ use { BankingStage, BankingStageStats, }, banking_trace::{BankingPacketBatch, BankingTracer}, + bundle_stage::bundle_account_locker::BundleAccountLocker, }, solana_entry::entry::{next_hash, Entry}, solana_gossip::cluster_info::{ClusterInfo, Node}, @@ -54,6 +55,7 @@ use { vote_state::VoteStateUpdate, vote_transaction::new_vote_state_update_transaction, }, std::{ + collections::HashSet, iter::repeat_with, sync::{atomic::Ordering, Arc}, time::{Duration, Instant}, @@ -65,8 +67,15 @@ fn check_txs(receiver: &Arc>, ref_tx_count: usize) { let mut total = 0; let now = Instant::now(); loop { - if let Ok((_bank, (entry, _tick_height))) = receiver.recv_timeout(Duration::new(1, 0)) { - total += entry.transactions.len(); + if let Ok(WorkingBankEntry { + bank: _, + entries_ticks, + }) = receiver.recv_timeout(Duration::new(1, 0)) + { + total += entries_ticks + .iter() + .map(|e| e.0.transactions.len()) + .sum::(); } if total >= ref_tx_count { break; @@ -109,7 +118,14 @@ fn bench_consume_buffered(bencher: &mut Bencher) { ); let (s, _r) = unbounded(); let committer = Committer::new(None, s, Arc::new(PrioritizationFeeCache::new(0u64))); - let consumer = Consumer::new(committer, recorder, QosService::new(1), None); + let consumer = Consumer::new( + committer, + recorder, + QosService::new(1), + None, + HashSet::default(), + BundleAccountLocker::default(), + ); // This tests the performance of buffering packets. // If the packet buffers are copied, performance will be poor. bencher.iter(move || { @@ -305,6 +321,8 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) { Arc::new(ConnectionCache::new("connection_cache_test")), bank_forks, &Arc::new(PrioritizationFeeCache::new(0u64)), + HashSet::default(), + BundleAccountLocker::default(), ); let chunk_len = verified.len() / CHUNKS; diff --git a/core/benches/consumer.rs b/core/benches/consumer.rs index 928758deb7..0781f9bd3f 100644 --- a/core/benches/consumer.rs +++ b/core/benches/consumer.rs @@ -7,16 +7,16 @@ use { iter::IndexedParallelIterator, prelude::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator}, }, - solana_core::banking_stage::{ - committer::Committer, consumer::Consumer, qos_service::QosService, + solana_core::{ + banking_stage::{committer::Committer, consumer::Consumer, qos_service::QosService}, + bundle_stage::bundle_account_locker::BundleAccountLocker, }, - solana_entry::entry::Entry, solana_ledger::{ blockstore::Blockstore, genesis_utils::{create_genesis_config, GenesisConfigInfo}, }, solana_poh::{ - poh_recorder::{create_test_recorder, PohRecorder}, + poh_recorder::{create_test_recorder, PohRecorder, WorkingBankEntry}, poh_service::PohService, }, solana_runtime::bank::Bank, @@ -25,9 +25,12 @@ use { signer::Signer, stake_history::Epoch, system_program, system_transaction, transaction::SanitizedTransaction, }, - std::sync::{ - atomic::{AtomicBool, Ordering}, - Arc, RwLock, + std::{ + collections::HashSet, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, RwLock, + }, }, tempfile::TempDir, test::Bencher, @@ -80,7 +83,14 @@ fn create_consumer(poh_recorder: &RwLock) -> Consumer { let (replay_vote_sender, _replay_vote_receiver) = unbounded(); let committer = Committer::new(None, replay_vote_sender, Arc::default()); let transaction_recorder = poh_recorder.read().unwrap().new_recorder(); - Consumer::new(committer, transaction_recorder, QosService::new(0), None) + Consumer::new( + committer, + transaction_recorder, + QosService::new(0), + None, + HashSet::default(), + BundleAccountLocker::default(), + ) } struct BenchFrame { @@ -89,7 +99,7 @@ struct BenchFrame { exit: Arc, poh_recorder: Arc>, poh_service: PohService, - signal_receiver: Receiver<(Arc, (Entry, u64))>, + signal_receiver: Receiver, } fn setup(apply_cost_tracker_during_replay: bool) -> BenchFrame { diff --git a/core/benches/proto_to_packet.rs b/core/benches/proto_to_packet.rs new file mode 100644 index 0000000000..87f85f9c7f --- /dev/null +++ b/core/benches/proto_to_packet.rs @@ -0,0 +1,56 @@ +#![feature(test)] + +extern crate test; + +use { + jito_protos::proto::packet::{ + Meta as PbMeta, Packet as PbPacket, PacketBatch, PacketFlags as PbFlags, + }, + solana_core::proto_packet_to_packet, + solana_sdk::packet::{Packet, PACKET_DATA_SIZE}, + std::iter::repeat, + test::{black_box, Bencher}, +}; + +fn get_proto_packet(i: u8) -> PbPacket { + PbPacket { + data: repeat(i).take(PACKET_DATA_SIZE).collect(), + meta: Some(PbMeta { + size: PACKET_DATA_SIZE as u64, + addr: "255.255.255.255:65535".to_string(), + port: 65535, + flags: Some(PbFlags { + discard: false, + forwarded: false, + repair: false, + simple_vote_tx: false, + tracer_packet: false, + }), + sender_stake: 0, + }), + } +} + +#[bench] +fn bench_proto_to_packet(bencher: &mut Bencher) { + bencher.iter(|| { + black_box(proto_packet_to_packet(get_proto_packet(1))); + }); +} + +#[bench] +fn bench_batch_list_to_packets(bencher: &mut Bencher) { + let packet_batch = PacketBatch { + packets: (0..128).map(get_proto_packet).collect(), + }; + + bencher.iter(|| { + black_box( + packet_batch + .packets + .iter() + .map(|p| proto_packet_to_packet(p.clone())) + .collect::>(), + ); + }); +} diff --git a/core/src/admin_rpc_post_init.rs b/core/src/admin_rpc_post_init.rs index 110e1f5aa4..7373ffd5b3 100644 --- a/core/src/admin_rpc_post_init.rs +++ b/core/src/admin_rpc_post_init.rs @@ -1,10 +1,12 @@ use { + crate::proxy::{block_engine_stage::BlockEngineConfig, relayer_stage::RelayerConfig}, solana_gossip::cluster_info::ClusterInfo, solana_runtime::bank_forks::BankForks, solana_sdk::pubkey::Pubkey, std::{ collections::HashSet, - sync::{Arc, RwLock}, + net::SocketAddr, + sync::{Arc, Mutex, RwLock}, }, }; @@ -14,4 +16,7 @@ pub struct AdminRpcRequestMetadataPostInit { pub bank_forks: Arc>, pub vote_account: Pubkey, pub repair_whitelist: Arc>>, + pub block_engine_config: Arc>, + pub relayer_config: Arc>, + pub shred_receiver_address: Arc>>, } diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index ad52db88ac..622c5cfdee 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -16,8 +16,9 @@ use { unprocessed_transaction_storage::{ThreadType, UnprocessedTransactionStorage}, }, crate::{ - banking_trace::BankingPacketReceiver, tracer_packet_stats::TracerPacketStats, - validator::BlockProductionMethod, + banking_trace::BankingPacketReceiver, + bundle_stage::bundle_account_locker::BundleAccountLocker, + tracer_packet_stats::TracerPacketStats, validator::BlockProductionMethod, }, crossbeam_channel::RecvTimeoutError, histogram::Histogram, @@ -28,10 +29,12 @@ use { solana_perf::{data_budget::DataBudget, packet::PACKETS_PER_BATCH}, solana_poh::poh_recorder::PohRecorder, solana_runtime::{bank_forks::BankForks, prioritization_fee_cache::PrioritizationFeeCache}, - solana_sdk::timing::AtomicInterval, + solana_sdk::{pubkey::Pubkey, timing::AtomicInterval}, solana_vote::vote_sender_types::ReplayVoteSender, std::{ - cmp, env, + cmp, + collections::HashSet, + env, sync::{ atomic::{AtomicU64, AtomicUsize, Ordering}, Arc, RwLock, @@ -50,13 +53,13 @@ pub mod unprocessed_packet_batches; pub mod unprocessed_transaction_storage; mod consume_worker; -mod decision_maker; +pub(crate) mod decision_maker; mod forward_packet_batches_by_accounts; mod forward_worker; mod forwarder; -mod immutable_deserialized_packet; +pub(crate) mod immutable_deserialized_packet; mod latest_unprocessed_votes; -mod leader_slot_timing_metrics; +pub(crate) mod leader_slot_timing_metrics; mod multi_iterator_scanner; mod packet_deserializer; mod packet_receiver; @@ -327,6 +330,8 @@ impl BankingStage { connection_cache: Arc, bank_forks: Arc>, prioritization_fee_cache: &Arc, + blacklisted_accounts: HashSet, + bundle_account_locker: BundleAccountLocker, ) -> Self { Self::new_num_threads( block_production_method, @@ -342,6 +347,8 @@ impl BankingStage { connection_cache, bank_forks, prioritization_fee_cache, + blacklisted_accounts, + bundle_account_locker, ) } @@ -360,6 +367,8 @@ impl BankingStage { connection_cache: Arc, bank_forks: Arc>, prioritization_fee_cache: &Arc, + blacklisted_accounts: HashSet, + bundle_account_locker: BundleAccountLocker, ) -> Self { match block_production_method { BlockProductionMethod::ThreadLocalMultiIterator => { @@ -376,6 +385,8 @@ impl BankingStage { connection_cache, bank_forks, prioritization_fee_cache, + blacklisted_accounts, + bundle_account_locker, ) } } @@ -395,6 +406,8 @@ impl BankingStage { connection_cache: Arc, bank_forks: Arc>, prioritization_fee_cache: &Arc, + blacklisted_accounts: HashSet, + bundle_account_locker: BundleAccountLocker, ) -> Self { assert!(num_threads >= MIN_TOTAL_THREADS); // Single thread to generate entries from many banks. @@ -454,6 +467,8 @@ impl BankingStage { poh_recorder.read().unwrap().new_recorder(), QosService::new(id), log_messages_bytes_limit, + blacklisted_accounts.clone(), + bundle_account_locker.clone(), ); Builder::new() @@ -614,7 +629,7 @@ mod tests { crate::banking_trace::{BankingPacketBatch, BankingTracer}, crossbeam_channel::{unbounded, Receiver}, itertools::Itertools, - solana_entry::entry::{Entry, EntrySlice}, + solana_entry::entry::EntrySlice, solana_gossip::cluster_info::Node, solana_ledger::{ blockstore::Blockstore, @@ -628,6 +643,7 @@ mod tests { solana_poh::{ poh_recorder::{ create_test_recorder, PohRecorderError, Record, RecordTransactionsSummary, + WorkingBankEntry, }, poh_service::PohService, }, @@ -702,6 +718,8 @@ mod tests { Arc::new(ConnectionCache::new("connection_cache_test")), bank_forks, &Arc::new(PrioritizationFeeCache::new(0u64)), + HashSet::default(), + BundleAccountLocker::default(), ); drop(non_vote_sender); drop(tpu_vote_sender); @@ -759,6 +777,8 @@ mod tests { Arc::new(ConnectionCache::new("connection_cache_test")), bank_forks, &Arc::new(PrioritizationFeeCache::new(0u64)), + HashSet::default(), + BundleAccountLocker::default(), ); trace!("sending bank"); drop(non_vote_sender); @@ -771,7 +791,12 @@ mod tests { trace!("getting entries"); let entries: Vec<_> = entry_receiver .iter() - .map(|(_bank, (entry, _tick_height))| entry) + .flat_map( + |WorkingBankEntry { + bank: _, + entries_ticks, + }| entries_ticks.into_iter().map(|(e, _)| e), + ) .collect(); trace!("done"); assert_eq!(entries.len(), genesis_config.ticks_per_slot as usize); @@ -841,6 +866,8 @@ mod tests { Arc::new(ConnectionCache::new("connection_cache_test")), bank_forks, &Arc::new(PrioritizationFeeCache::new(0u64)), + HashSet::default(), + BundleAccountLocker::default(), ); // fund another account so we can send 2 good transactions in a single batch. @@ -892,9 +919,14 @@ mod tests { bank.process_transaction(&fund_tx).unwrap(); //receive entries + ticks loop { - let entries: Vec = entry_receiver + let entries: Vec<_> = entry_receiver .iter() - .map(|(_bank, (entry, _tick_height))| entry) + .flat_map( + |WorkingBankEntry { + bank: _, + entries_ticks, + }| entries_ticks.into_iter().map(|(e, _)| e), + ) .collect(); assert!(entries.verify(&blockhash)); @@ -1003,6 +1035,8 @@ mod tests { Arc::new(ConnectionCache::new("connection_cache_test")), bank_forks, &Arc::new(PrioritizationFeeCache::new(0u64)), + HashSet::default(), + BundleAccountLocker::default(), ); // wait for banking_stage to eat the packets @@ -1021,7 +1055,12 @@ mod tests { // check that the balance is what we expect. let entries: Vec<_> = entry_receiver .iter() - .map(|(_bank, (entry, _tick_height))| entry) + .flat_map( + |WorkingBankEntry { + bank: _, + entries_ticks, + }| entries_ticks.into_iter().map(|(e, _)| e), + ) .collect(); let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); @@ -1082,15 +1121,19 @@ mod tests { system_transaction::transfer(&keypair2, &pubkey2, 1, genesis_config.hash()).into(), ]; - let _ = recorder.record_transactions(bank.slot(), txs.clone()); - let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap(); + let _ = recorder.record_transactions(bank.slot(), vec![txs.clone()]); + let WorkingBankEntry { + bank, + entries_ticks, + } = entry_receiver.recv().unwrap(); + let entry = &entries_ticks.get(0).unwrap().0; assert_eq!(entry.transactions, txs); // Once bank is set to a new bank (setting bank.slot() + 1 in record_transactions), // record_transactions should throw MaxHeightReached let next_slot = bank.slot() + 1; let RecordTransactionsSummary { result, .. } = - recorder.record_transactions(next_slot, txs); + recorder.record_transactions(next_slot, vec![txs]); assert_matches!(result, Err(PohRecorderError::MaxHeightReached)); // Should receive nothing from PohRecorder b/c record failed assert!(entry_receiver.try_recv().is_err()); @@ -1194,6 +1237,8 @@ mod tests { Arc::new(ConnectionCache::new("connection_cache_test")), bank_forks, &Arc::new(PrioritizationFeeCache::new(0u64)), + HashSet::default(), + BundleAccountLocker::default(), ); let keypairs = (0..100).map(|_| Keypair::new()).collect_vec(); diff --git a/core/src/banking_stage/committer.rs b/core/src/banking_stage/committer.rs index 88b129aae3..fcd27e6e1a 100644 --- a/core/src/banking_stage/committer.rs +++ b/core/src/banking_stage/committer.rs @@ -15,12 +15,10 @@ use { prioritization_fee_cache::PrioritizationFeeCache, transaction_batch::TransactionBatch, }, - solana_sdk::{hash::Hash, pubkey::Pubkey, saturating_add_assign}, - solana_transaction_status::{ - token_balances::TransactionTokenBalancesSet, TransactionTokenBalance, - }, + solana_sdk::{hash::Hash, saturating_add_assign}, + solana_transaction_status::{token_balances::TransactionTokenBalancesSet, PreBalanceInfo}, solana_vote::vote_sender_types::ReplayVoteSender, - std::{collections::HashMap, sync::Arc}, + std::sync::Arc, }; #[derive(Clone, Debug, PartialEq, Eq)] @@ -29,13 +27,6 @@ pub enum CommitTransactionDetails { NotCommitted, } -#[derive(Default)] -pub(super) struct PreBalanceInfo { - pub native: Vec>, - pub token: Vec>, - pub mint_decimals: HashMap, -} - pub struct Committer { transaction_status_sender: Option, replay_vote_sender: ReplayVoteSender, @@ -143,7 +134,7 @@ impl Committer { let txs = batch.sanitized_transactions().to_vec(); let post_balances = bank.collect_balances(batch); let post_token_balances = - collect_token_balances(bank, batch, &mut pre_balance_info.mint_decimals); + collect_token_balances(bank, batch, &mut pre_balance_info.mint_decimals, None); let mut transaction_index = starting_transaction_index.unwrap_or_default(); let batch_transaction_indexes: Vec<_> = tx_results .execution_results diff --git a/core/src/banking_stage/consume_worker.rs b/core/src/banking_stage/consume_worker.rs index d2451efa1c..3abdee1585 100644 --- a/core/src/banking_stage/consume_worker.rs +++ b/core/src/banking_stage/consume_worker.rs @@ -126,11 +126,14 @@ fn try_drain_iter(work: T, receiver: &Receiver) -> impl Iterator mod tests { use { super::*, - crate::banking_stage::{ - committer::Committer, - qos_service::QosService, - scheduler_messages::{TransactionBatchId, TransactionId}, - tests::{create_slow_genesis_config, sanitize_transactions, simulate_poh}, + crate::{ + banking_stage::{ + committer::Committer, + qos_service::QosService, + scheduler_messages::{TransactionBatchId, TransactionId}, + tests::{create_slow_genesis_config, sanitize_transactions, simulate_poh}, + }, + bundle_stage::bundle_account_locker::BundleAccountLocker, }, crossbeam_channel::unbounded, solana_ledger::{ @@ -145,6 +148,7 @@ mod tests { }, solana_vote::vote_sender_types::ReplayVoteReceiver, std::{ + collections::HashSet, sync::{atomic::AtomicBool, RwLock}, thread::JoinHandle, }, @@ -202,7 +206,14 @@ mod tests { replay_vote_sender, Arc::new(PrioritizationFeeCache::new(0u64)), ); - let consumer = Consumer::new(committer, recorder, QosService::new(1), None); + let consumer = Consumer::new( + committer, + recorder, + QosService::new(1), + None, + HashSet::default(), + BundleAccountLocker::default(), + ); let (consume_sender, consume_receiver) = unbounded(); let (consumed_sender, consumed_receiver) = unbounded(); diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index c8624a96aa..134d7096a5 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -1,6 +1,6 @@ use { super::{ - committer::{CommitTransactionDetails, Committer, PreBalanceInfo}, + committer::{CommitTransactionDetails, Committer}, immutable_deserialized_packet::ImmutableDeserializedPacket, leader_slot_metrics::{LeaderSlotMetricsTracker, ProcessTransactionsSummary}, leader_slot_timing_metrics::LeaderExecuteAndCommitTimings, @@ -8,6 +8,7 @@ use { unprocessed_transaction_storage::{ConsumeScannerPayload, UnprocessedTransactionStorage}, BankingStageStats, }, + crate::bundle_stage::bundle_account_locker::BundleAccountLocker, itertools::Itertools, solana_accounts_db::{ transaction_error_metrics::TransactionErrorMetrics, @@ -19,18 +20,21 @@ use { BankStart, PohRecorderError, RecordTransactionsSummary, RecordTransactionsTimings, TransactionRecorder, }, - solana_program_runtime::timings::ExecuteTimings, solana_runtime::{ bank::{Bank, LoadAndExecuteTransactionsOutput}, transaction_batch::TransactionBatch, }, solana_sdk::{ clock::{Slot, FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, MAX_PROCESSING_AGE}, - feature_set, saturating_add_assign, + feature_set, + pubkey::Pubkey, + saturating_add_assign, timing::timestamp, transaction::{self, AddressLoader, SanitizedTransaction, TransactionError}, }, + solana_transaction_status::PreBalanceInfo, std::{ + collections::HashSet, sync::{atomic::Ordering, Arc}, time::Instant, }, @@ -71,6 +75,8 @@ pub struct Consumer { transaction_recorder: TransactionRecorder, qos_service: QosService, log_messages_bytes_limit: Option, + blacklisted_accounts: HashSet, + bundle_account_locker: BundleAccountLocker, } impl Consumer { @@ -79,12 +85,16 @@ impl Consumer { transaction_recorder: TransactionRecorder, qos_service: QosService, log_messages_bytes_limit: Option, + blacklisted_accounts: HashSet, + bundle_account_locker: BundleAccountLocker, ) -> Self { Self { committer, transaction_recorder, qos_service, log_messages_bytes_limit, + blacklisted_accounts, + bundle_account_locker, } } @@ -114,6 +124,7 @@ impl Consumer { packets_to_process, ) }, + &self.blacklisted_accounts, ); if reached_end_of_slot { @@ -444,20 +455,26 @@ impl Consumer { cost_model_us, ) = measure_us!(self.qos_service.select_and_accumulate_transaction_costs( bank, + &mut bank.write_cost_tracker().unwrap(), txs, pre_results )); // Only lock accounts for those transactions are selected for the block; // Once accounts are locked, other threads cannot encode transactions that will modify the - // same account state + // same account state. + // BundleAccountLocker is used to prevent race conditions with bundled transactions from bundle stage + let bundle_account_locks = self.bundle_account_locker.account_locks(); let (batch, lock_us) = measure_us!(bank.prepare_sanitized_batch_with_results( txs, transaction_qos_cost_results.iter().map(|r| match r { Ok(_cost) => Ok(()), Err(err) => Err(err.clone()), - }) + }), + &bundle_account_locks.read_locks(), + &bundle_account_locks.write_locks() )); + drop(bundle_account_locks); // retryable_txs includes AccountInUse, WouldExceedMaxBlockCostLimit // WouldExceedMaxAccountCostLimit, WouldExceedMaxVoteCostLimit @@ -502,8 +519,9 @@ impl Consumer { .iter_mut() .for_each(|x| *x += chunk_offset); - let (cu, us) = - Self::accumulate_execute_units_and_time(&execute_and_commit_timings.execute_timings); + let (cu, us) = execute_and_commit_timings + .execute_timings + .accumulate_execute_units_and_time(); self.qos_service.accumulate_actual_execute_cu(cu); self.qos_service.accumulate_actual_execute_time(us); @@ -540,7 +558,7 @@ impl Consumer { if transaction_status_sender_enabled { pre_balance_info.native = bank.collect_balances(batch); pre_balance_info.token = - collect_token_balances(bank, batch, &mut pre_balance_info.mint_decimals) + collect_token_balances(bank, batch, &mut pre_balance_info.mint_decimals, None) } }); execute_and_commit_timings.collect_balances_us = collect_balances_us; @@ -594,7 +612,7 @@ impl Consumer { let (record_transactions_summary, record_us) = measure_us!(self .transaction_recorder - .record_transactions(bank.slot(), executed_transactions)); + .record_transactions(bank.slot(), vec![executed_transactions])); execute_and_commit_timings.record_us = record_us; let RecordTransactionsSummary { @@ -678,18 +696,6 @@ impl Consumer { } } - fn accumulate_execute_units_and_time(execute_timings: &ExecuteTimings) -> (u64, u64) { - execute_timings.details.per_program_timings.values().fold( - (0, 0), - |(units, times), program_timings| { - ( - units.saturating_add(program_timings.accumulated_units), - times.saturating_add(program_timings.accumulated_us), - ) - }, - ) - } - /// This function filters pending packets that are still valid /// # Arguments /// * `transactions` - a batch of transactions deserialized from packets @@ -755,7 +761,7 @@ mod tests { }, solana_perf::packet::Packet, solana_poh::poh_recorder::{PohRecorder, WorkingBankEntry}, - solana_program_runtime::timings::ProgramTiming, + solana_program_runtime::timings::{ExecuteTimings, ProgramTiming}, solana_rpc::transaction_status_service::TransactionStatusService, solana_runtime::prioritization_fee_cache::PrioritizationFeeCache, solana_sdk::{ @@ -818,7 +824,14 @@ mod tests { replay_vote_sender, Arc::new(PrioritizationFeeCache::new(0u64)), ); - let consumer = Consumer::new(committer, recorder, QosService::new(1), None); + let consumer = Consumer::new( + committer, + recorder, + QosService::new(1), + None, + HashSet::default(), + BundleAccountLocker::default(), + ); let process_transactions_summary = consumer.process_transactions(&bank, &Instant::now(), &transactions); @@ -974,7 +987,14 @@ mod tests { replay_vote_sender, Arc::new(PrioritizationFeeCache::new(0u64)), ); - let consumer = Consumer::new(committer, recorder, QosService::new(1), None); + let consumer = Consumer::new( + committer, + recorder, + QosService::new(1), + None, + HashSet::default(), + BundleAccountLocker::default(), + ); let process_transactions_batch_output = consumer.process_and_record_transactions(&bank, &transactions, 0); @@ -999,7 +1019,13 @@ mod tests { let mut done = false; // read entries until I find mine, might be ticks... - while let Ok((_bank, (entry, _tick_height))) = entry_receiver.recv() { + while let Ok(WorkingBankEntry { + bank, + entries_ticks, + }) = entry_receiver.recv() + { + assert!(entries_ticks.len() == 1); + let entry = &entries_ticks.get(0).unwrap().0; if !entry.is_tick() { trace!("got entry"); assert_eq!(entry.transactions.len(), transactions.len()); @@ -1101,7 +1127,14 @@ mod tests { replay_vote_sender, Arc::new(PrioritizationFeeCache::new(0u64)), ); - let consumer = Consumer::new(committer, recorder, QosService::new(1), None); + let consumer = Consumer::new( + committer, + recorder, + QosService::new(1), + None, + HashSet::default(), + BundleAccountLocker::default(), + ); let process_transactions_batch_output = consumer.process_and_record_transactions(&bank, &transactions, 0); @@ -1187,7 +1220,14 @@ mod tests { replay_vote_sender, Arc::new(PrioritizationFeeCache::new(0u64)), ); - let consumer = Consumer::new(committer, recorder, QosService::new(1), None); + let consumer = Consumer::new( + committer, + recorder, + QosService::new(1), + None, + HashSet::default(), + BundleAccountLocker::default(), + ); let get_block_cost = || bank.read_cost_tracker().unwrap().block_cost(); let get_tx_count = || bank.read_cost_tracker().unwrap().transaction_count(); @@ -1339,7 +1379,14 @@ mod tests { replay_vote_sender, Arc::new(PrioritizationFeeCache::new(0u64)), ); - let consumer = Consumer::new(committer, recorder, QosService::new(1), None); + let consumer = Consumer::new( + committer, + recorder, + QosService::new(1), + None, + HashSet::default(), + BundleAccountLocker::default(), + ); let process_transactions_batch_output = consumer.process_and_record_transactions(&bank, &transactions, 0); @@ -1536,7 +1583,14 @@ mod tests { replay_vote_sender, Arc::new(PrioritizationFeeCache::new(0u64)), ); - let consumer = Consumer::new(committer, recorder.clone(), QosService::new(1), None); + let consumer = Consumer::new( + committer, + recorder.clone(), + QosService::new(1), + None, + HashSet::default(), + BundleAccountLocker::default(), + ); let process_transactions_summary = consumer.process_transactions(&bank, &Instant::now(), &transactions); @@ -1661,7 +1715,14 @@ mod tests { replay_vote_sender, Arc::new(PrioritizationFeeCache::new(0u64)), ); - let consumer = Consumer::new(committer, recorder, QosService::new(1), None); + let consumer = Consumer::new( + committer, + recorder, + QosService::new(1), + None, + HashSet::default(), + BundleAccountLocker::default(), + ); let _ = consumer.process_and_record_transactions(&bank, &transactions, 0); @@ -1798,7 +1859,14 @@ mod tests { replay_vote_sender, Arc::new(PrioritizationFeeCache::new(0u64)), ); - let consumer = Consumer::new(committer, recorder, QosService::new(1), None); + let consumer = Consumer::new( + committer, + recorder, + QosService::new(1), + None, + HashSet::default(), + BundleAccountLocker::default(), + ); let _ = consumer.process_and_record_transactions(&bank, &[sanitized_tx.clone()], 0); @@ -1858,7 +1926,14 @@ mod tests { replay_vote_sender, Arc::new(PrioritizationFeeCache::new(0u64)), ); - let consumer = Consumer::new(committer, recorder, QosService::new(1), None); + let consumer = Consumer::new( + committer, + recorder, + QosService::new(1), + None, + HashSet::default(), + BundleAccountLocker::default(), + ); // When the working bank in poh_recorder is None, no packets should be processed (consume will not be called) assert!(!poh_recorder.read().unwrap().has_bank()); @@ -1936,7 +2011,14 @@ mod tests { replay_vote_sender, Arc::new(PrioritizationFeeCache::new(0u64)), ); - let consumer = Consumer::new(committer, recorder, QosService::new(1), None); + let consumer = Consumer::new( + committer, + recorder, + QosService::new(1), + None, + HashSet::default(), + BundleAccountLocker::default(), + ); // When the working bank in poh_recorder is None, no packets should be processed assert!(!poh_recorder.read().unwrap().has_bank()); @@ -1988,7 +2070,14 @@ mod tests { replay_vote_sender, Arc::new(PrioritizationFeeCache::new(0u64)), ); - let consumer = Consumer::new(committer, recorder, QosService::new(1), None); + let consumer = Consumer::new( + committer, + recorder, + QosService::new(1), + None, + HashSet::default(), + BundleAccountLocker::default(), + ); // When the working bank in poh_recorder is None, no packets should be processed (consume will not be called) assert!(!poh_recorder.read().unwrap().has_bank()); @@ -2076,7 +2165,7 @@ mod tests { expected_units += n * 1000; } - let (units, us) = Consumer::accumulate_execute_units_and_time(&execute_timings); + let (units, us) = execute_timings.accumulate_execute_units_and_time(); assert_eq!(expected_units, units); assert_eq!(expected_us, us); diff --git a/core/src/banking_stage/latest_unprocessed_votes.rs b/core/src/banking_stage/latest_unprocessed_votes.rs index 10772b74de..f81a3ca7d8 100644 --- a/core/src/banking_stage/latest_unprocessed_votes.rs +++ b/core/src/banking_stage/latest_unprocessed_votes.rs @@ -136,7 +136,7 @@ pub(crate) fn weighted_random_order_by_stake<'a>( } #[derive(Default, Debug)] -pub(crate) struct VoteBatchInsertionMetrics { +pub struct VoteBatchInsertionMetrics { pub(crate) num_dropped_gossip: usize, pub(crate) num_dropped_tpu: usize, } diff --git a/core/src/banking_stage/qos_service.rs b/core/src/banking_stage/qos_service.rs index abac9c70f8..70e1ca1699 100644 --- a/core/src/banking_stage/qos_service.rs +++ b/core/src/banking_stage/qos_service.rs @@ -5,7 +5,9 @@ use { super::{committer::CommitTransactionDetails, BatchedTransactionDetails}, - solana_cost_model::{cost_model::CostModel, transaction_cost::TransactionCost}, + solana_cost_model::{ + cost_model::CostModel, cost_tracker::CostTracker, transaction_cost::TransactionCost, + }, solana_measure::measure::Measure, solana_runtime::bank::Bank, solana_sdk::{ @@ -40,6 +42,7 @@ impl QosService { pub fn select_and_accumulate_transaction_costs( &self, bank: &Bank, + cost_tracker: &mut CostTracker, // caller should pass in &mut bank.write_cost_tracker().unwrap() transactions: &[SanitizedTransaction], pre_results: impl Iterator>, ) -> (Vec>, usize) { @@ -48,7 +51,8 @@ impl QosService { let (transactions_qos_cost_results, num_included) = self.select_transactions_per_cost( transactions.iter(), transaction_costs.into_iter(), - bank, + bank.slot(), + cost_tracker, ); self.accumulate_estimated_transaction_costs(&Self::accumulate_batched_transaction_costs( transactions_qos_cost_results.iter(), @@ -94,10 +98,10 @@ impl QosService { &self, transactions: impl Iterator, transactions_costs: impl Iterator>, - bank: &Bank, + slot: Slot, + cost_tracker: &mut CostTracker, ) -> (Vec>, usize) { let mut cost_tracking_time = Measure::start("cost_tracking_time"); - let mut cost_tracker = bank.write_cost_tracker().unwrap(); let mut num_included = 0; let select_results = transactions.zip(transactions_costs) .map(|(tx, cost)| { @@ -105,13 +109,13 @@ impl QosService { Ok(cost) => { match cost_tracker.try_add(&cost) { Ok(current_block_cost) => { - debug!("slot {:?}, transaction {:?}, cost {:?}, fit into current block, current block cost {}", bank.slot(), tx, cost, current_block_cost); + debug!("slot {:?}, transaction {:?}, cost {:?}, fit into current block, current block cost {}", slot, tx, cost, current_block_cost); self.metrics.stats.selected_txs_count.fetch_add(1, Ordering::Relaxed); num_included += 1; Ok(cost) }, Err(e) => { - debug!("slot {:?}, transaction {:?}, cost {:?}, not fit into current block, '{:?}'", bank.slot(), tx, cost, e); + debug!("slot {:?}, transaction {:?}, cost {:?}, not fit into current block, '{:?}'", slot, tx, cost, e); Err(TransactionError::from(e)) } } @@ -683,8 +687,12 @@ mod tests { bank.write_cost_tracker() .unwrap() .set_limits(cost_limit, cost_limit, cost_limit); - let (results, num_selected) = - qos_service.select_transactions_per_cost(txs.iter(), txs_costs.into_iter(), &bank); + let (results, num_selected) = qos_service.select_transactions_per_cost( + txs.iter(), + txs_costs.into_iter(), + bank.slot(), + &mut bank.write_cost_tracker().unwrap(), + ); assert_eq!(num_selected, 2); // verify that first transfer tx and first vote are allowed @@ -725,8 +733,12 @@ mod tests { .iter() .map(|cost| cost.as_ref().unwrap().sum()) .sum(); - let (qos_cost_results, _num_included) = - qos_service.select_transactions_per_cost(txs.iter(), txs_costs.into_iter(), &bank); + let (qos_cost_results, _num_included) = qos_service.select_transactions_per_cost( + txs.iter(), + txs_costs.into_iter(), + bank.slot(), + &mut bank.write_cost_tracker().unwrap(), + ); assert_eq!( total_txs_cost, bank.read_cost_tracker().unwrap().block_cost() @@ -793,8 +805,12 @@ mod tests { .iter() .map(|cost| cost.as_ref().unwrap().sum()) .sum(); - let (qos_cost_results, _num_included) = - qos_service.select_transactions_per_cost(txs.iter(), txs_costs.into_iter(), &bank); + let (qos_cost_results, _num_included) = qos_service.select_transactions_per_cost( + txs.iter(), + txs_costs.into_iter(), + bank.slot(), + &mut bank.write_cost_tracker().unwrap(), + ); assert_eq!( total_txs_cost, bank.read_cost_tracker().unwrap().block_cost() @@ -847,8 +863,12 @@ mod tests { .iter() .map(|cost| cost.as_ref().unwrap().sum()) .sum(); - let (qos_cost_results, _num_included) = - qos_service.select_transactions_per_cost(txs.iter(), txs_costs.into_iter(), &bank); + let (qos_cost_results, _num_included) = qos_service.select_transactions_per_cost( + txs.iter(), + txs_costs.into_iter(), + bank.slot(), + &mut bank.write_cost_tracker().unwrap(), + ); assert_eq!( total_txs_cost, bank.read_cost_tracker().unwrap().block_cost() diff --git a/core/src/banking_stage/unprocessed_transaction_storage.rs b/core/src/banking_stage/unprocessed_transaction_storage.rs index 80ce087532..f0d095ddd2 100644 --- a/core/src/banking_stage/unprocessed_transaction_storage.rs +++ b/core/src/banking_stage/unprocessed_transaction_storage.rs @@ -14,17 +14,29 @@ use { }, BankingStageStats, FilterForwardingResults, ForwardOption, }, + crate::{ + bundle_stage::bundle_stage_leader_metrics::BundleStageLeaderMetrics, + immutable_deserialized_bundle::ImmutableDeserializedBundle, + }, itertools::Itertools, min_max_heap::MinMaxHeap, + solana_accounts_db::transaction_error_metrics::TransactionErrorMetrics, + solana_bundle::BundleExecutionError, solana_measure::measure, solana_runtime::bank::Bank, solana_sdk::{ - clock::FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, feature_set::FeatureSet, hash::Hash, - saturating_add_assign, transaction::SanitizedTransaction, + bundle::SanitizedBundle, + clock::{Slot, FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET}, + feature_set::FeatureSet, + hash::Hash, + pubkey::Pubkey, + saturating_add_assign, + transaction::SanitizedTransaction, }, std::{ - collections::HashMap, + collections::{HashMap, HashSet, VecDeque}, sync::{atomic::Ordering, Arc}, + time::Instant, }, }; @@ -39,6 +51,7 @@ const MAX_NUM_VOTES_RECEIVE: usize = 10_000; pub enum UnprocessedTransactionStorage { VoteStorage(VoteStorage), LocalTransactionStorage(ThreadLocalUnprocessedPackets), + BundleStorage(BundleStorage), } #[derive(Debug)] @@ -57,10 +70,11 @@ pub struct VoteStorage { pub enum ThreadType { Voting(VoteSource), Transactions, + Bundles, } #[derive(Debug)] -pub(crate) enum InsertPacketBatchSummary { +pub enum InsertPacketBatchSummary { VoteBatchInsertionMetrics(VoteBatchInsertionMetrics), PacketBatchInsertionMetrics(PacketBatchInsertionMetrics), } @@ -143,6 +157,7 @@ fn consume_scan_should_process_packet( banking_stage_stats: &BankingStageStats, packet: &ImmutableDeserializedPacket, payload: &mut ConsumeScannerPayload, + blacklisted_accounts: &HashSet, ) -> ProcessingDecision { // If end of the slot, return should process (quick loop after reached end of slot) if payload.reached_end_of_slot { @@ -177,6 +192,10 @@ fn consume_scan_should_process_packet( bank.get_transaction_account_lock_limit(), ) .is_err() + || message + .account_keys() + .iter() + .any(|key| blacklisted_accounts.contains(key)) { payload .message_hash_to_transaction @@ -245,10 +264,24 @@ impl UnprocessedTransactionStorage { }) } + pub fn new_bundle_storage( + unprocessed_bundle_storage: VecDeque, + cost_model_failed_bundles: VecDeque, + ) -> Self { + Self::BundleStorage(BundleStorage { + last_update_slot: Slot::default(), + unprocessed_bundle_storage, + cost_model_buffered_bundle_storage: cost_model_failed_bundles, + }) + } + pub fn is_empty(&self) -> bool { match self { Self::VoteStorage(vote_storage) => vote_storage.is_empty(), Self::LocalTransactionStorage(transaction_storage) => transaction_storage.is_empty(), + UnprocessedTransactionStorage::BundleStorage(bundle_storage) => { + bundle_storage.is_empty() + } } } @@ -256,6 +289,10 @@ impl UnprocessedTransactionStorage { match self { Self::VoteStorage(vote_storage) => vote_storage.len(), Self::LocalTransactionStorage(transaction_storage) => transaction_storage.len(), + UnprocessedTransactionStorage::BundleStorage(bundle_storage) => { + bundle_storage.unprocessed_bundles_len() + + bundle_storage.cost_model_buffered_bundles_len() + } } } @@ -266,6 +303,9 @@ impl UnprocessedTransactionStorage { Self::LocalTransactionStorage(transaction_storage) => { transaction_storage.max_receive_size() } + UnprocessedTransactionStorage::BundleStorage(bundle_storage) => { + bundle_storage.max_receive_size() + } } } @@ -292,6 +332,9 @@ impl UnprocessedTransactionStorage { Self::LocalTransactionStorage(transaction_storage) => { transaction_storage.forward_option() } + UnprocessedTransactionStorage::BundleStorage(bundle_storage) => { + bundle_storage.forward_option() + } } } @@ -299,6 +342,16 @@ impl UnprocessedTransactionStorage { match self { Self::LocalTransactionStorage(transaction_storage) => transaction_storage.clear(), // Since we set everything as forwarded this is the same Self::VoteStorage(vote_storage) => vote_storage.clear_forwarded_packets(), + UnprocessedTransactionStorage::BundleStorage(bundle_storage) => { + let _ = bundle_storage.reset(); + } + } + } + + pub fn bundle_storage(&mut self) -> Option<&mut BundleStorage> { + match self { + UnprocessedTransactionStorage::BundleStorage(bundle_stoge) => Some(bundle_stoge), + _ => None, } } @@ -313,6 +366,11 @@ impl UnprocessedTransactionStorage { Self::LocalTransactionStorage(transaction_storage) => InsertPacketBatchSummary::from( transaction_storage.insert_batch(deserialized_packets), ), + UnprocessedTransactionStorage::BundleStorage(_) => { + panic!( + "bundles must be inserted using UnprocessedTransactionStorage::insert_bundle" + ) + } } } @@ -332,6 +390,9 @@ impl UnprocessedTransactionStorage { bank, forward_packet_batches_by_accounts, ), + UnprocessedTransactionStorage::BundleStorage(_) => { + panic!("bundles are not forwarded between leaders") + } } } @@ -345,6 +406,7 @@ impl UnprocessedTransactionStorage { banking_stage_stats: &BankingStageStats, slot_metrics_tracker: &mut LeaderSlotMetricsTracker, processing_function: F, + blacklisted_accounts: &HashSet, ) -> bool where F: FnMut( @@ -359,15 +421,62 @@ impl UnprocessedTransactionStorage { banking_stage_stats, slot_metrics_tracker, processing_function, + blacklisted_accounts, ), Self::VoteStorage(vote_storage) => vote_storage.process_packets( bank, banking_stage_stats, slot_metrics_tracker, processing_function, + blacklisted_accounts, + ), + UnprocessedTransactionStorage::BundleStorage(_) => panic!( + "UnprocessedTransactionStorage::BundleStorage does not support processing packets" ), } } + + #[must_use] + pub fn process_bundles( + &mut self, + bank: Arc, + bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics, + blacklisted_accounts: &HashSet, + processing_function: F, + ) -> bool + where + F: FnMut( + &[(ImmutableDeserializedBundle, SanitizedBundle)], + &mut BundleStageLeaderMetrics, + ) -> Vec>, + { + match self { + UnprocessedTransactionStorage::BundleStorage(bundle_storage) => bundle_storage + .process_bundles( + bank, + bundle_stage_leader_metrics, + blacklisted_accounts, + processing_function, + ), + _ => panic!("class does not support processing bundles"), + } + } + + /// Inserts bundles into storage. Only supported for UnprocessedTransactionStorage::BundleStorage + pub(crate) fn insert_bundles( + &mut self, + deserialized_bundles: Vec, + ) -> InsertPacketBundlesSummary { + match self { + UnprocessedTransactionStorage::BundleStorage(bundle_storage) => { + bundle_storage.insert_unprocessed_bundles(deserialized_bundles, true) + } + UnprocessedTransactionStorage::LocalTransactionStorage(_) + | UnprocessedTransactionStorage::VoteStorage(_) => { + panic!("UnprocessedTransactionStorage::insert_bundles only works for type UnprocessedTransactionStorage::BundleStorage"); + } + } + } } impl VoteStorage { @@ -436,6 +545,7 @@ impl VoteStorage { banking_stage_stats: &BankingStageStats, slot_metrics_tracker: &mut LeaderSlotMetricsTracker, mut processing_function: F, + blacklisted_accounts: &HashSet, ) -> bool where F: FnMut( @@ -449,7 +559,13 @@ impl VoteStorage { let should_process_packet = |packet: &Arc, payload: &mut ConsumeScannerPayload| { - consume_scan_should_process_packet(&bank, banking_stage_stats, packet, payload) + consume_scan_should_process_packet( + &bank, + banking_stage_stats, + packet, + payload, + blacklisted_accounts, + ) }; // Based on the stake distribution present in the supplied bank, drain the unprocessed votes @@ -524,6 +640,7 @@ impl ThreadLocalUnprocessedPackets { ThreadType::Transactions => ForwardOption::ForwardTransaction, ThreadType::Voting(VoteSource::Tpu) => ForwardOption::ForwardTpuVote, ThreadType::Voting(VoteSource::Gossip) => ForwardOption::NotForward, + ThreadType::Bundles => panic!(), // TODO (LB) } } @@ -848,6 +965,7 @@ impl ThreadLocalUnprocessedPackets { banking_stage_stats: &BankingStageStats, slot_metrics_tracker: &mut LeaderSlotMetricsTracker, mut processing_function: F, + blacklisted_accounts: &HashSet, ) -> bool where F: FnMut( @@ -862,7 +980,13 @@ impl ThreadLocalUnprocessedPackets { let should_process_packet = |packet: &Arc, payload: &mut ConsumeScannerPayload| { - consume_scan_should_process_packet(bank, banking_stage_stats, packet, payload) + consume_scan_should_process_packet( + bank, + banking_stage_stats, + packet, + payload, + blacklisted_accounts, + ) }; let mut scanner = create_consume_multi_iterator( &all_packets_to_process, @@ -939,6 +1063,309 @@ impl ThreadLocalUnprocessedPackets { } } +pub struct InsertPacketBundlesSummary { + pub insert_packets_summary: InsertPacketBatchSummary, + pub num_bundles_inserted: usize, + pub num_packets_inserted: usize, + pub num_bundles_dropped: usize, +} + +/// Bundle storage has two deques: one for unprocessed bundles and another for ones that exceeded +/// the cost model and need to get retried next slot. +#[derive(Debug)] +pub struct BundleStorage { + last_update_slot: Slot, + unprocessed_bundle_storage: VecDeque, + // Storage for bundles that exceeded the cost model for the slot they were last attempted + // execution on + cost_model_buffered_bundle_storage: VecDeque, +} + +impl BundleStorage { + fn is_empty(&self) -> bool { + self.unprocessed_bundle_storage.is_empty() + } + + pub fn unprocessed_bundles_len(&self) -> usize { + self.unprocessed_bundle_storage.len() + } + + pub fn unprocessed_packets_len(&self) -> usize { + self.unprocessed_bundle_storage + .iter() + .map(|b| b.len()) + .sum::() + } + + pub(crate) fn cost_model_buffered_bundles_len(&self) -> usize { + self.cost_model_buffered_bundle_storage.len() + } + + pub(crate) fn cost_model_buffered_packets_len(&self) -> usize { + self.cost_model_buffered_bundle_storage + .iter() + .map(|b| b.len()) + .sum() + } + + pub(crate) fn max_receive_size(&self) -> usize { + self.unprocessed_bundle_storage.capacity() - self.unprocessed_bundle_storage.len() + } + + fn forward_option(&self) -> ForwardOption { + ForwardOption::NotForward + } + + /// Returns the number of unprocessed bundles + cost model buffered cleared + pub fn reset(&mut self) -> (usize, usize) { + let num_unprocessed_bundles = self.unprocessed_bundle_storage.len(); + let num_cost_model_buffered_bundles = self.cost_model_buffered_bundle_storage.len(); + self.unprocessed_bundle_storage.clear(); + self.cost_model_buffered_bundle_storage.clear(); + (num_unprocessed_bundles, num_cost_model_buffered_bundles) + } + + fn insert_bundles( + deque: &mut VecDeque, + deserialized_bundles: Vec, + push_back: bool, + ) -> InsertPacketBundlesSummary { + let mut num_bundles_inserted: usize = 0; + let mut num_packets_inserted: usize = 0; + let mut num_bundles_dropped: usize = 0; + let mut num_packets_dropped: usize = 0; + + for bundle in deserialized_bundles { + if deque.capacity() == deque.len() { + saturating_add_assign!(num_bundles_dropped, 1); + saturating_add_assign!(num_packets_dropped, bundle.len()); + } else { + saturating_add_assign!(num_bundles_inserted, 1); + saturating_add_assign!(num_packets_inserted, bundle.len()); + if push_back { + deque.push_back(bundle); + } else { + deque.push_front(bundle) + } + } + } + + InsertPacketBundlesSummary { + insert_packets_summary: PacketBatchInsertionMetrics { + num_dropped_packets: num_packets_dropped, + num_dropped_tracer_packets: 0, + } + .into(), + num_bundles_inserted, + num_packets_inserted, + num_bundles_dropped, + } + } + + fn push_front_unprocessed_bundles( + &mut self, + deserialized_bundles: Vec, + ) -> InsertPacketBundlesSummary { + Self::insert_bundles( + &mut self.unprocessed_bundle_storage, + deserialized_bundles, + false, + ) + } + + fn push_back_cost_model_buffered_bundles( + &mut self, + deserialized_bundles: Vec, + ) -> InsertPacketBundlesSummary { + Self::insert_bundles( + &mut self.cost_model_buffered_bundle_storage, + deserialized_bundles, + true, + ) + } + + fn insert_unprocessed_bundles( + &mut self, + deserialized_bundles: Vec, + push_back: bool, + ) -> InsertPacketBundlesSummary { + Self::insert_bundles( + &mut self.unprocessed_bundle_storage, + deserialized_bundles, + push_back, + ) + } + + /// Drains bundles from the queue, sanitizes them to prepare for execution, executes them by + /// calling `processing_function`, then potentially rebuffer them. + pub fn process_bundles( + &mut self, + bank: Arc, + bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics, + blacklisted_accounts: &HashSet, + mut processing_function: F, + ) -> bool + where + F: FnMut( + &[(ImmutableDeserializedBundle, SanitizedBundle)], + &mut BundleStageLeaderMetrics, + ) -> Vec>, + { + let sanitized_bundles = self.drain_and_sanitize_bundles( + bank, + bundle_stage_leader_metrics, + blacklisted_accounts, + ); + + debug!("processing {} bundles", sanitized_bundles.len()); + let bundle_execution_results = + processing_function(&sanitized_bundles, bundle_stage_leader_metrics); + + let mut is_slot_over = false; + + let mut rebuffered_bundles = Vec::new(); + + sanitized_bundles + .into_iter() + .zip(bundle_execution_results) + .for_each( + |((deserialized_bundle, sanitized_bundle), result)| match result { + Ok(_) => { + debug!("bundle={} executed ok", sanitized_bundle.bundle_id); + // yippee + } + Err(BundleExecutionError::PohRecordError(e)) => { + // buffer the bundle to the front of the queue to be attempted next slot + debug!( + "bundle={} poh record error: {e:?}", + sanitized_bundle.bundle_id + ); + rebuffered_bundles.push(deserialized_bundle); + is_slot_over = true; + } + Err(BundleExecutionError::BankProcessingTimeLimitReached) => { + // buffer the bundle to the front of the queue to be attempted next slot + debug!("bundle={} bank processing done", sanitized_bundle.bundle_id); + rebuffered_bundles.push(deserialized_bundle); + is_slot_over = true; + } + Err(BundleExecutionError::TransactionFailure(e)) => { + debug!( + "bundle={} execution error: {:?}", + sanitized_bundle.bundle_id, e + ); + // do nothing + } + Err(BundleExecutionError::ExceedsCostModel) => { + // cost model buffered bundles contain most recent bundles at the front of the queue + debug!("bundle={} exceeds cost model", sanitized_bundle.bundle_id); + self.push_back_cost_model_buffered_bundles(vec![deserialized_bundle]); + } + Err(BundleExecutionError::TipError(e)) => { + debug!("bundle={} tip error: {}", sanitized_bundle.bundle_id, e); + // Tip errors are _typically_ due to misconfiguration (except for poh record error, bank processing error, exceeds cost model) + // in order to prevent buffering too many bundles, we'll just drop the bundle + } + Err(BundleExecutionError::LockError) => { + // lock errors are irrecoverable due to malformed transactions + debug!("bundle={} lock error", sanitized_bundle.bundle_id); + } + }, + ); + + // rebuffered bundles are pushed onto deque in reverse order so the first bundle is at the front + for bundle in rebuffered_bundles.into_iter().rev() { + self.push_front_unprocessed_bundles(vec![bundle]); + } + + is_slot_over + } + + /// Drains the unprocessed_bundle_storage, converting bundle packets into SanitizedBundles + fn drain_and_sanitize_bundles( + &mut self, + bank: Arc, + bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics, + blacklisted_accounts: &HashSet, + ) -> Vec<(ImmutableDeserializedBundle, SanitizedBundle)> { + let mut error_metrics = TransactionErrorMetrics::default(); + + let start = Instant::now(); + + let mut sanitized_bundles = Vec::new(); + + // on new slot, drain anything that was buffered from last slot + if bank.slot() != self.last_update_slot { + sanitized_bundles.extend( + self.cost_model_buffered_bundle_storage + .drain(..) + .filter_map(|packet_bundle| { + let r = packet_bundle.build_sanitized_bundle( + &bank, + blacklisted_accounts, + &mut error_metrics, + ); + bundle_stage_leader_metrics + .bundle_stage_metrics_tracker() + .increment_sanitize_transaction_result(&r); + + match r { + Ok(sanitized_bundle) => Some((packet_bundle, sanitized_bundle)), + Err(e) => { + debug!( + "bundle id: {} error sanitizing: {}", + packet_bundle.bundle_id(), + e + ); + None + } + } + }), + ); + + self.last_update_slot = bank.slot(); + } + + sanitized_bundles.extend(self.unprocessed_bundle_storage.drain(..).filter_map( + |packet_bundle| { + let r = packet_bundle.build_sanitized_bundle( + &bank, + blacklisted_accounts, + &mut error_metrics, + ); + bundle_stage_leader_metrics + .bundle_stage_metrics_tracker() + .increment_sanitize_transaction_result(&r); + match r { + Ok(sanitized_bundle) => Some((packet_bundle, sanitized_bundle)), + Err(e) => { + debug!( + "bundle id: {} error sanitizing: {}", + packet_bundle.bundle_id(), + e + ); + None + } + } + }, + )); + + let elapsed = start.elapsed().as_micros(); + bundle_stage_leader_metrics + .bundle_stage_metrics_tracker() + .increment_sanitize_bundle_elapsed_us(elapsed as u64); + bundle_stage_leader_metrics + .leader_slot_metrics_tracker() + .increment_transactions_from_packets_us(elapsed as u64); + + bundle_stage_leader_metrics + .leader_slot_metrics_tracker() + .accumulate_transaction_errors(&error_metrics); + + sanitized_bundles + } +} + #[cfg(test)] mod tests { use { diff --git a/core/src/banking_trace.rs b/core/src/banking_trace.rs index ba76b794ba..bedfe117dc 100644 --- a/core/src/banking_trace.rs +++ b/core/src/banking_trace.rs @@ -315,6 +315,7 @@ impl BankingTracer { } } +#[derive(Clone)] pub struct TracedSender { label: ChannelLabel, sender: Sender, diff --git a/core/src/bundle_stage.rs b/core/src/bundle_stage.rs new file mode 100644 index 0000000000..3a4103831b --- /dev/null +++ b/core/src/bundle_stage.rs @@ -0,0 +1,436 @@ +//! The `bundle_stage` processes bundles, which are list of transactions to be executed +//! sequentially and atomically. +use { + crate::{ + banking_stage::{ + decision_maker::{BufferedPacketsDecision, DecisionMaker}, + qos_service::QosService, + unprocessed_transaction_storage::UnprocessedTransactionStorage, + }, + bundle_stage::{ + bundle_account_locker::BundleAccountLocker, bundle_consumer::BundleConsumer, + bundle_packet_receiver::BundleReceiver, + bundle_reserved_space_manager::BundleReservedSpaceManager, + bundle_stage_leader_metrics::BundleStageLeaderMetrics, committer::Committer, + }, + packet_bundle::PacketBundle, + proxy::block_engine_stage::BlockBuilderFeeInfo, + tip_manager::TipManager, + }, + crossbeam_channel::{Receiver, RecvTimeoutError}, + solana_cost_model::block_cost_limits::MAX_BLOCK_UNITS, + solana_gossip::cluster_info::ClusterInfo, + solana_ledger::blockstore_processor::TransactionStatusSender, + solana_measure::measure, + solana_poh::poh_recorder::PohRecorder, + solana_runtime::{bank_forks::BankForks, prioritization_fee_cache::PrioritizationFeeCache}, + solana_sdk::timing::AtomicInterval, + solana_vote::vote_sender_types::ReplayVoteSender, + std::{ + collections::VecDeque, + sync::{ + atomic::{AtomicBool, AtomicU64, Ordering}, + Arc, Mutex, RwLock, + }, + thread::{self, Builder, JoinHandle}, + time::{Duration, Instant}, + }, +}; + +pub mod bundle_account_locker; +mod bundle_consumer; +mod bundle_packet_deserializer; +mod bundle_packet_receiver; +mod bundle_reserved_space_manager; +pub(crate) mod bundle_stage_leader_metrics; +mod committer; + +const MAX_BUNDLE_RETRY_DURATION: Duration = Duration::from_millis(10); +const SLOT_BOUNDARY_CHECK_PERIOD: Duration = Duration::from_millis(10); + +// Stats emitted periodically +#[derive(Default)] +pub struct BundleStageLoopMetrics { + last_report: AtomicInterval, + id: u32, + + // total received + num_bundles_received: AtomicU64, + num_packets_received: AtomicU64, + + // newly buffered + newly_buffered_bundles_count: AtomicU64, + + // currently buffered + current_buffered_bundles_count: AtomicU64, + current_buffered_packets_count: AtomicU64, + + // buffered due to cost model + cost_model_buffered_bundles_count: AtomicU64, + cost_model_buffered_packets_count: AtomicU64, + + // number of bundles dropped during insertion + num_bundles_dropped: AtomicU64, + + // timings + receive_and_buffer_bundles_elapsed_us: AtomicU64, + process_buffered_bundles_elapsed_us: AtomicU64, +} + +impl BundleStageLoopMetrics { + fn new(id: u32) -> Self { + BundleStageLoopMetrics { + id, + ..BundleStageLoopMetrics::default() + } + } + + pub fn increment_num_bundles_received(&mut self, count: u64) { + self.num_bundles_received + .fetch_add(count, Ordering::Relaxed); + } + + pub fn increment_num_packets_received(&mut self, count: u64) { + self.num_packets_received + .fetch_add(count, Ordering::Relaxed); + } + + pub fn increment_newly_buffered_bundles_count(&mut self, count: u64) { + self.newly_buffered_bundles_count + .fetch_add(count, Ordering::Relaxed); + } + + pub fn increment_current_buffered_bundles_count(&mut self, count: u64) { + self.current_buffered_bundles_count + .fetch_add(count, Ordering::Relaxed); + } + + pub fn increment_current_buffered_packets_count(&mut self, count: u64) { + self.current_buffered_packets_count + .fetch_add(count, Ordering::Relaxed); + } + + pub fn increment_cost_model_buffered_bundles_count(&mut self, count: u64) { + self.cost_model_buffered_bundles_count + .fetch_add(count, Ordering::Relaxed); + } + + pub fn increment_cost_model_buffered_packets_count(&mut self, count: u64) { + self.cost_model_buffered_packets_count + .fetch_add(count, Ordering::Relaxed); + } + + pub fn increment_num_bundles_dropped(&mut self, count: u64) { + self.num_bundles_dropped.fetch_add(count, Ordering::Relaxed); + } + + pub fn increment_receive_and_buffer_bundles_elapsed_us(&mut self, count: u64) { + self.receive_and_buffer_bundles_elapsed_us + .fetch_add(count, Ordering::Relaxed); + } + + pub fn increment_process_buffered_bundles_elapsed_us(&mut self, count: u64) { + self.process_buffered_bundles_elapsed_us + .fetch_add(count, Ordering::Relaxed); + } +} + +impl BundleStageLoopMetrics { + fn maybe_report(&mut self, report_interval_ms: u64) { + if self.last_report.should_update(report_interval_ms) { + datapoint_info!( + "bundle_stage-loop_stats", + ("id", self.id, i64), + ( + "num_bundles_received", + self.num_bundles_received.swap(0, Ordering::Acquire) as i64, + i64 + ), + ( + "num_packets_received", + self.num_packets_received.swap(0, Ordering::Acquire) as i64, + i64 + ), + ( + "newly_buffered_bundles_count", + self.newly_buffered_bundles_count.swap(0, Ordering::Acquire) as i64, + i64 + ), + ( + "current_buffered_bundles_count", + self.current_buffered_bundles_count + .swap(0, Ordering::Acquire) as i64, + i64 + ), + ( + "current_buffered_packets_count", + self.current_buffered_packets_count + .swap(0, Ordering::Acquire) as i64, + i64 + ), + ( + "num_bundles_dropped", + self.num_bundles_dropped.swap(0, Ordering::Acquire) as i64, + i64 + ), + ( + "receive_and_buffer_bundles_elapsed_us", + self.receive_and_buffer_bundles_elapsed_us + .swap(0, Ordering::Acquire) as i64, + i64 + ), + ( + "process_buffered_bundles_elapsed_us", + self.process_buffered_bundles_elapsed_us + .swap(0, Ordering::Acquire) as i64, + i64 + ), + ); + } + } +} + +pub struct BundleStage { + bundle_thread: JoinHandle<()>, +} + +impl BundleStage { + #[allow(clippy::new_ret_no_self)] + #[allow(clippy::too_many_arguments)] + pub fn new( + cluster_info: &Arc, + poh_recorder: &Arc>, + bundle_receiver: Receiver>, + transaction_status_sender: Option, + replay_vote_sender: ReplayVoteSender, + log_messages_bytes_limit: Option, + exit: Arc, + tip_manager: TipManager, + bundle_account_locker: BundleAccountLocker, + block_builder_fee_info: &Arc>, + preallocated_bundle_cost: u64, + bank_forks: Arc>, + prioritization_fee_cache: &Arc, + ) -> Self { + Self::start_bundle_thread( + cluster_info, + poh_recorder, + bundle_receiver, + transaction_status_sender, + replay_vote_sender, + log_messages_bytes_limit, + exit, + tip_manager, + bundle_account_locker, + MAX_BUNDLE_RETRY_DURATION, + block_builder_fee_info, + preallocated_bundle_cost, + bank_forks, + prioritization_fee_cache, + ) + } + + pub fn join(self) -> thread::Result<()> { + self.bundle_thread.join() + } + + #[allow(clippy::too_many_arguments)] + fn start_bundle_thread( + cluster_info: &Arc, + poh_recorder: &Arc>, + bundle_receiver: Receiver>, + transaction_status_sender: Option, + replay_vote_sender: ReplayVoteSender, + log_message_bytes_limit: Option, + exit: Arc, + tip_manager: TipManager, + bundle_account_locker: BundleAccountLocker, + max_bundle_retry_duration: Duration, + block_builder_fee_info: &Arc>, + preallocated_bundle_cost: u64, + bank_forks: Arc>, + prioritization_fee_cache: &Arc, + ) -> Self { + const BUNDLE_STAGE_ID: u32 = 10_000; + let poh_recorder = poh_recorder.clone(); + let cluster_info = cluster_info.clone(); + + let mut bundle_receiver = + BundleReceiver::new(BUNDLE_STAGE_ID, bundle_receiver, bank_forks, Some(5)); + + let committer = Committer::new( + transaction_status_sender, + replay_vote_sender, + prioritization_fee_cache.clone(), + ); + let decision_maker = DecisionMaker::new(cluster_info.id(), poh_recorder.clone()); + + let unprocessed_bundle_storage = UnprocessedTransactionStorage::new_bundle_storage( + VecDeque::with_capacity(1_000), + VecDeque::with_capacity(1_000), + ); + + let reserved_ticks = poh_recorder + .read() + .unwrap() + .ticks_per_slot() + .saturating_mul(8) + .saturating_div(10); + + // The first 80% of the block, based on poh ticks, has `preallocated_bundle_cost` less compute units. + // The last 20% has has full compute so blockspace is maximized if BundleStage is idle. + let reserved_space = BundleReservedSpaceManager::new( + MAX_BLOCK_UNITS, + preallocated_bundle_cost, + reserved_ticks, + ); + + let consumer = BundleConsumer::new( + committer, + poh_recorder.read().unwrap().new_recorder(), + QosService::new(BUNDLE_STAGE_ID), + log_message_bytes_limit, + tip_manager, + bundle_account_locker, + block_builder_fee_info.clone(), + max_bundle_retry_duration, + cluster_info, + reserved_space, + ); + + let bundle_thread = Builder::new() + .name("solBundleStgTx".to_string()) + .spawn(move || { + Self::process_loop( + &mut bundle_receiver, + decision_maker, + consumer, + BUNDLE_STAGE_ID, + unprocessed_bundle_storage, + exit, + ); + }) + .unwrap(); + + Self { bundle_thread } + } + + #[allow(clippy::too_many_arguments)] + fn process_loop( + bundle_receiver: &mut BundleReceiver, + decision_maker: DecisionMaker, + mut consumer: BundleConsumer, + id: u32, + mut unprocessed_bundle_storage: UnprocessedTransactionStorage, + exit: Arc, + ) { + let mut last_metrics_update = Instant::now(); + + let mut bundle_stage_metrics = BundleStageLoopMetrics::new(id); + let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(id); + + while !exit.load(Ordering::Relaxed) { + if !unprocessed_bundle_storage.is_empty() + || last_metrics_update.elapsed() >= SLOT_BOUNDARY_CHECK_PERIOD + { + let (_, process_buffered_packets_time) = measure!( + Self::process_buffered_bundles( + &decision_maker, + &mut consumer, + &mut unprocessed_bundle_storage, + &mut bundle_stage_leader_metrics, + ), + "process_buffered_packets", + ); + bundle_stage_leader_metrics + .leader_slot_metrics_tracker() + .increment_process_buffered_packets_us(process_buffered_packets_time.as_us()); + last_metrics_update = Instant::now(); + } + + match bundle_receiver.receive_and_buffer_bundles( + &mut unprocessed_bundle_storage, + &mut bundle_stage_metrics, + &mut bundle_stage_leader_metrics, + ) { + Ok(_) | Err(RecvTimeoutError::Timeout) => (), + Err(RecvTimeoutError::Disconnected) => break, + } + + let bundle_storage = unprocessed_bundle_storage.bundle_storage().unwrap(); + bundle_stage_metrics.increment_current_buffered_bundles_count( + bundle_storage.unprocessed_bundles_len() as u64, + ); + bundle_stage_metrics.increment_current_buffered_packets_count( + bundle_storage.unprocessed_packets_len() as u64, + ); + bundle_stage_metrics.increment_cost_model_buffered_bundles_count( + bundle_storage.cost_model_buffered_bundles_len() as u64, + ); + bundle_stage_metrics.increment_cost_model_buffered_packets_count( + bundle_storage.cost_model_buffered_packets_len() as u64, + ); + bundle_stage_metrics.maybe_report(1_000); + } + } + + #[allow(clippy::too_many_arguments)] + fn process_buffered_bundles( + decision_maker: &DecisionMaker, + consumer: &mut BundleConsumer, + unprocessed_bundle_storage: &mut UnprocessedTransactionStorage, + bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics, + ) { + let (decision, make_decision_time) = + measure!(decision_maker.make_consume_or_forward_decision()); + + let (metrics_action, banking_stage_metrics_action) = + bundle_stage_leader_metrics.check_leader_slot_boundary(decision.bank_start()); + bundle_stage_leader_metrics + .leader_slot_metrics_tracker() + .increment_make_decision_us(make_decision_time.as_us()); + + match decision { + // BufferedPacketsDecision::Consume means this leader is scheduled to be running at the moment. + // Execute, record, and commit as many bundles possible given time, compute, and other constraints. + BufferedPacketsDecision::Consume(bank_start) => { + // Take metrics action before consume packets (potentially resetting the + // slot metrics tracker to the next slot) so that we don't count the + // packet processing metrics from the next slot towards the metrics + // of the previous slot + bundle_stage_leader_metrics + .apply_action(metrics_action, banking_stage_metrics_action); + + let (_, consume_buffered_packets_time) = measure!( + consumer.consume_buffered_bundles( + &bank_start, + unprocessed_bundle_storage, + bundle_stage_leader_metrics, + ), + "consume_buffered_bundles", + ); + bundle_stage_leader_metrics + .leader_slot_metrics_tracker() + .increment_consume_buffered_packets_us(consume_buffered_packets_time.as_us()); + } + // BufferedPacketsDecision::Forward means the leader is slot is far away. + // Bundles aren't forwarded because it breaks atomicity guarantees, so just drop them. + BufferedPacketsDecision::Forward => { + let (_num_bundles_cleared, _num_cost_model_buffered_bundles) = + unprocessed_bundle_storage.bundle_storage().unwrap().reset(); + + // TODO (LB): add metrics here for how many bundles were cleared + + bundle_stage_leader_metrics + .apply_action(metrics_action, banking_stage_metrics_action); + } + // BufferedPacketsDecision::ForwardAndHold | BufferedPacketsDecision::Hold means the validator + // is approaching the leader slot, hold bundles. Also, bundles aren't forwarded because it breaks + // atomicity guarantees + BufferedPacketsDecision::ForwardAndHold | BufferedPacketsDecision::Hold => { + bundle_stage_leader_metrics + .apply_action(metrics_action, banking_stage_metrics_action); + } + } + } +} diff --git a/core/src/bundle_stage/bundle_account_locker.rs b/core/src/bundle_stage/bundle_account_locker.rs new file mode 100644 index 0000000000..5ea8b5396d --- /dev/null +++ b/core/src/bundle_stage/bundle_account_locker.rs @@ -0,0 +1,326 @@ +//! Handles pre-locking bundle accounts so that accounts bundles touch can be reserved ahead +// of time for execution. Also, ensures that ALL accounts mentioned across a bundle are locked +// to avoid race conditions between BundleStage and BankingStage. +// +// For instance, imagine a bundle with three transactions and the set of accounts for each transaction +// is: {{A, B}, {B, C}, {C, D}}. We need to lock A, B, and C even though only one is executed at a time. +// Imagine BundleStage is in the middle of processing {C, D} and we didn't have a lock on accounts {A, B, C}. +// In this situation, there's a chance that BankingStage can process a transaction containing A or B +// and commit the results before the bundle completes. By the time the bundle commits the new account +// state for {A, B, C}, A and B would be incorrect and the entries containing the bundle would be +// replayed improperly and that leader would have produced an invalid block. +use { + solana_runtime::bank::Bank, + solana_sdk::{bundle::SanitizedBundle, pubkey::Pubkey, transaction::TransactionAccountLocks}, + std::{ + collections::{hash_map::Entry, HashMap, HashSet}, + sync::{Arc, Mutex, MutexGuard}, + }, + thiserror::Error, +}; + +#[derive(Clone, Error, Debug)] +pub enum BundleAccountLockerError { + #[error("locking error")] + LockingError, +} + +pub type BundleAccountLockerResult = Result; + +pub struct LockedBundle<'a, 'b> { + bundle_account_locker: &'a BundleAccountLocker, + sanitized_bundle: &'b SanitizedBundle, + bank: Arc, +} + +impl<'a, 'b> LockedBundle<'a, 'b> { + pub fn new( + bundle_account_locker: &'a BundleAccountLocker, + sanitized_bundle: &'b SanitizedBundle, + bank: &Arc, + ) -> Self { + Self { + bundle_account_locker, + sanitized_bundle, + bank: bank.clone(), + } + } + + pub fn sanitized_bundle(&self) -> &SanitizedBundle { + self.sanitized_bundle + } +} + +// Automatically unlock bundle accounts when destructed +impl<'a, 'b> Drop for LockedBundle<'a, 'b> { + fn drop(&mut self) { + let _ = self + .bundle_account_locker + .unlock_bundle_accounts(self.sanitized_bundle, &self.bank); + } +} + +#[derive(Default, Clone)] +pub struct BundleAccountLocks { + read_locks: HashMap, + write_locks: HashMap, +} + +impl BundleAccountLocks { + pub fn read_locks(&self) -> HashSet { + self.read_locks.keys().cloned().collect() + } + + pub fn write_locks(&self) -> HashSet { + self.write_locks.keys().cloned().collect() + } + + pub fn lock_accounts( + &mut self, + read_locks: HashMap, + write_locks: HashMap, + ) { + for (acc, count) in read_locks { + *self.read_locks.entry(acc).or_insert(0) += count; + } + for (acc, count) in write_locks { + *self.write_locks.entry(acc).or_insert(0) += count; + } + } + + pub fn unlock_accounts( + &mut self, + read_locks: HashMap, + write_locks: HashMap, + ) { + for (acc, count) in read_locks { + if let Entry::Occupied(mut entry) = self.read_locks.entry(acc) { + let val = entry.get_mut(); + *val = val.saturating_sub(count); + if entry.get() == &0 { + let _ = entry.remove(); + } + } else { + warn!("error unlocking read-locked account, account: {:?}", acc); + } + } + for (acc, count) in write_locks { + if let Entry::Occupied(mut entry) = self.write_locks.entry(acc) { + let val = entry.get_mut(); + *val = val.saturating_sub(count); + if entry.get() == &0 { + let _ = entry.remove(); + } + } else { + warn!("error unlocking write-locked account, account: {:?}", acc); + } + } + } +} + +#[derive(Clone, Default)] +pub struct BundleAccountLocker { + account_locks: Arc>, +} + +impl BundleAccountLocker { + /// used in BankingStage during TransactionBatch construction to ensure that BankingStage + /// doesn't lock anything currently locked in the BundleAccountLocker + pub fn read_locks(&self) -> HashSet { + self.account_locks.lock().unwrap().read_locks() + } + + /// used in BankingStage during TransactionBatch construction to ensure that BankingStage + /// doesn't lock anything currently locked in the BundleAccountLocker + pub fn write_locks(&self) -> HashSet { + self.account_locks.lock().unwrap().write_locks() + } + + /// used in BankingStage during TransactionBatch construction to ensure that BankingStage + /// doesn't lock anything currently locked in the BundleAccountLocker + pub fn account_locks(&self) -> MutexGuard { + self.account_locks.lock().unwrap() + } + + /// Prepares a locked bundle and returns a LockedBundle containing locked accounts. + /// When a LockedBundle is dropped, the accounts are automatically unlocked + pub fn prepare_locked_bundle<'a, 'b>( + &'a self, + sanitized_bundle: &'b SanitizedBundle, + bank: &Arc, + ) -> BundleAccountLockerResult> { + let (read_locks, write_locks) = Self::get_read_write_locks(sanitized_bundle, bank)?; + + self.account_locks + .lock() + .unwrap() + .lock_accounts(read_locks, write_locks); + Ok(LockedBundle::new(self, sanitized_bundle, bank)) + } + + /// Unlocks bundle accounts. Note that LockedBundle::drop will auto-drop the bundle account locks + fn unlock_bundle_accounts( + &self, + sanitized_bundle: &SanitizedBundle, + bank: &Bank, + ) -> BundleAccountLockerResult<()> { + let (read_locks, write_locks) = Self::get_read_write_locks(sanitized_bundle, bank)?; + + self.account_locks + .lock() + .unwrap() + .unlock_accounts(read_locks, write_locks); + Ok(()) + } + + /// Returns the read and write locks for this bundle + /// Each lock type contains a HashMap which maps Pubkey to number of locks held + fn get_read_write_locks( + bundle: &SanitizedBundle, + bank: &Bank, + ) -> BundleAccountLockerResult<(HashMap, HashMap)> { + let transaction_locks: Vec = bundle + .transactions + .iter() + .filter_map(|tx| { + tx.get_account_locks(bank.get_transaction_account_lock_limit()) + .ok() + }) + .collect(); + + if transaction_locks.len() != bundle.transactions.len() { + return Err(BundleAccountLockerError::LockingError); + } + + let bundle_read_locks = transaction_locks + .iter() + .flat_map(|tx| tx.readonly.iter().map(|a| **a)); + let bundle_read_locks = + bundle_read_locks + .into_iter() + .fold(HashMap::new(), |mut map, acc| { + *map.entry(acc).or_insert(0) += 1; + map + }); + + let bundle_write_locks = transaction_locks + .iter() + .flat_map(|tx| tx.writable.iter().map(|a| **a)); + let bundle_write_locks = + bundle_write_locks + .into_iter() + .fold(HashMap::new(), |mut map, acc| { + *map.entry(acc).or_insert(0) += 1; + map + }); + + Ok((bundle_read_locks, bundle_write_locks)) + } +} + +#[cfg(test)] +mod tests { + use { + crate::{ + bundle_stage::bundle_account_locker::BundleAccountLocker, + immutable_deserialized_bundle::ImmutableDeserializedBundle, + packet_bundle::PacketBundle, + }, + solana_accounts_db::transaction_error_metrics::TransactionErrorMetrics, + solana_ledger::genesis_utils::create_genesis_config, + solana_perf::packet::PacketBatch, + solana_runtime::{bank::Bank, genesis_utils::GenesisConfigInfo}, + solana_sdk::{ + packet::Packet, signature::Signer, signer::keypair::Keypair, system_program, + system_transaction::transfer, transaction::VersionedTransaction, + }, + std::{collections::HashSet, sync::Arc}, + }; + + #[test] + fn test_simple_lock_bundles() { + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(2); + let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + + let bundle_account_locker = BundleAccountLocker::default(); + + let kp0 = Keypair::new(); + let kp1 = Keypair::new(); + + let tx0 = VersionedTransaction::from(transfer( + &mint_keypair, + &kp0.pubkey(), + 1, + genesis_config.hash(), + )); + let tx1 = VersionedTransaction::from(transfer( + &mint_keypair, + &kp1.pubkey(), + 1, + genesis_config.hash(), + )); + + let mut packet_bundle0 = PacketBundle { + batch: PacketBatch::new(vec![Packet::from_data(None, &tx0).unwrap()]), + bundle_id: tx0.signatures[0].to_string(), + }; + let mut packet_bundle1 = PacketBundle { + batch: PacketBatch::new(vec![Packet::from_data(None, &tx1).unwrap()]), + bundle_id: tx1.signatures[0].to_string(), + }; + + let mut transaction_errors = TransactionErrorMetrics::default(); + + let sanitized_bundle0 = ImmutableDeserializedBundle::new(&mut packet_bundle0, None) + .unwrap() + .build_sanitized_bundle(&bank, &HashSet::default(), &mut transaction_errors) + .expect("sanitize bundle 0"); + let sanitized_bundle1 = ImmutableDeserializedBundle::new(&mut packet_bundle1, None) + .unwrap() + .build_sanitized_bundle(&bank, &HashSet::default(), &mut transaction_errors) + .expect("sanitize bundle 1"); + + let locked_bundle0 = bundle_account_locker + .prepare_locked_bundle(&sanitized_bundle0, &bank) + .unwrap(); + + assert_eq!( + bundle_account_locker.write_locks(), + HashSet::from_iter([mint_keypair.pubkey(), kp0.pubkey()]) + ); + assert_eq!( + bundle_account_locker.read_locks(), + HashSet::from_iter([system_program::id()]) + ); + + let locked_bundle1 = bundle_account_locker + .prepare_locked_bundle(&sanitized_bundle1, &bank) + .unwrap(); + assert_eq!( + bundle_account_locker.write_locks(), + HashSet::from_iter([mint_keypair.pubkey(), kp0.pubkey(), kp1.pubkey()]) + ); + assert_eq!( + bundle_account_locker.read_locks(), + HashSet::from_iter([system_program::id()]) + ); + + drop(locked_bundle0); + assert_eq!( + bundle_account_locker.write_locks(), + HashSet::from_iter([mint_keypair.pubkey(), kp1.pubkey()]) + ); + assert_eq!( + bundle_account_locker.read_locks(), + HashSet::from_iter([system_program::id()]) + ); + + drop(locked_bundle1); + assert!(bundle_account_locker.write_locks().is_empty()); + assert!(bundle_account_locker.read_locks().is_empty()); + } +} diff --git a/core/src/bundle_stage/bundle_consumer.rs b/core/src/bundle_stage/bundle_consumer.rs new file mode 100644 index 0000000000..beaf98bd55 --- /dev/null +++ b/core/src/bundle_stage/bundle_consumer.rs @@ -0,0 +1,1593 @@ +use { + crate::{ + banking_stage::{ + committer::CommitTransactionDetails, leader_slot_metrics::ProcessTransactionsSummary, + leader_slot_timing_metrics::LeaderExecuteAndCommitTimings, qos_service::QosService, + unprocessed_transaction_storage::UnprocessedTransactionStorage, + }, + bundle_stage::{ + bundle_account_locker::{BundleAccountLocker, LockedBundle}, + bundle_reserved_space_manager::BundleReservedSpaceManager, + bundle_stage_leader_metrics::BundleStageLeaderMetrics, + committer::Committer, + }, + consensus_cache_updater::ConsensusCacheUpdater, + immutable_deserialized_bundle::ImmutableDeserializedBundle, + proxy::block_engine_stage::BlockBuilderFeeInfo, + tip_manager::TipManager, + }, + solana_accounts_db::transaction_error_metrics::TransactionErrorMetrics, + solana_bundle::{ + bundle_execution::{load_and_execute_bundle, BundleExecutionMetrics}, + BundleExecutionError, BundleExecutionResult, TipError, + }, + solana_cost_model::transaction_cost::TransactionCost, + solana_gossip::cluster_info::ClusterInfo, + solana_measure::{measure, measure_us}, + solana_poh::poh_recorder::{BankStart, RecordTransactionsSummary, TransactionRecorder}, + solana_runtime::bank::Bank, + solana_sdk::{ + bundle::SanitizedBundle, + clock::{Slot, MAX_PROCESSING_AGE}, + feature_set, + pubkey::Pubkey, + transaction::{self}, + }, + std::{ + collections::HashSet, + sync::{Arc, Mutex}, + time::{Duration, Instant}, + }, +}; + +pub struct ExecuteRecordCommitResult { + commit_transaction_details: Vec, + result: BundleExecutionResult<()>, + execution_metrics: BundleExecutionMetrics, + execute_and_commit_timings: LeaderExecuteAndCommitTimings, + transaction_error_counter: TransactionErrorMetrics, +} + +pub struct BundleConsumer { + committer: Committer, + transaction_recorder: TransactionRecorder, + qos_service: QosService, + log_messages_bytes_limit: Option, + + consensus_cache_updater: ConsensusCacheUpdater, + + tip_manager: TipManager, + last_tip_update_slot: Slot, + + blacklisted_accounts: HashSet, + + // Manages account locks across multiple transactions within a bundle to prevent race conditions + // with BankingStage + bundle_account_locker: BundleAccountLocker, + + block_builder_fee_info: Arc>, + + max_bundle_retry_duration: Duration, + + cluster_info: Arc, + + reserved_space: BundleReservedSpaceManager, +} + +impl BundleConsumer { + #[allow(clippy::too_many_arguments)] + pub fn new( + committer: Committer, + transaction_recorder: TransactionRecorder, + qos_service: QosService, + log_messages_bytes_limit: Option, + tip_manager: TipManager, + bundle_account_locker: BundleAccountLocker, + block_builder_fee_info: Arc>, + max_bundle_retry_duration: Duration, + cluster_info: Arc, + reserved_space: BundleReservedSpaceManager, + ) -> Self { + Self { + committer, + transaction_recorder, + qos_service, + log_messages_bytes_limit, + consensus_cache_updater: ConsensusCacheUpdater::default(), + tip_manager, + // MAX because sending tips during slot 0 in tests doesn't work + last_tip_update_slot: u64::MAX, + blacklisted_accounts: HashSet::default(), + bundle_account_locker, + block_builder_fee_info, + max_bundle_retry_duration, + cluster_info, + reserved_space, + } + } + + // A bundle is a series of transactions to be executed sequentially, atomically, and all-or-nothing. + // Sequentially: + // - Transactions are executed in order + // Atomically: + // - All transactions in a bundle get recoded to PoH and committed to the bank in the same slot. Account locks + // for all accounts in all transactions in a bundle are held during the entire execution to remove POH record race conditions + // with transactions in BankingStage. + // All-or-nothing: + // - All transactions are committed or none. Modified state for the entire bundle isn't recorded to PoH and committed to the + // bank until all transactions in the bundle have executed. + // + // Some corner cases to be aware of when working with BundleStage: + // A bundle is not allowed to call the Tip Payment program in a bundle (or BankingStage). + // - This is to avoid stealing of tips by malicious parties with bundles that crank the tip + // payment program and set the tip receiver to themself. + // A bundle is not allowed to touch consensus-related accounts + // - This is to avoid stalling the voting BankingStage threads. + pub fn consume_buffered_bundles( + &mut self, + bank_start: &BankStart, + unprocessed_transaction_storage: &mut UnprocessedTransactionStorage, + bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics, + ) { + self.maybe_update_blacklist(bank_start); + self.reserved_space.tick(&bank_start.working_bank); + + let reached_end_of_slot = unprocessed_transaction_storage.process_bundles( + bank_start.working_bank.clone(), + bundle_stage_leader_metrics, + &self.blacklisted_accounts, + |bundles, bundle_stage_leader_metrics| { + Self::do_process_bundles( + &self.bundle_account_locker, + &self.tip_manager, + &mut self.last_tip_update_slot, + &self.cluster_info, + &self.block_builder_fee_info, + &self.committer, + &self.transaction_recorder, + &self.qos_service, + &self.log_messages_bytes_limit, + self.max_bundle_retry_duration, + &self.reserved_space, + bundles, + bank_start, + bundle_stage_leader_metrics, + ) + }, + ); + + if reached_end_of_slot { + bundle_stage_leader_metrics + .leader_slot_metrics_tracker() + .set_end_of_slot_unprocessed_buffer_len( + unprocessed_transaction_storage.len() as u64 + ); + } + } + + /// Blacklist is updated with the tip payment program + any consensus accounts. + fn maybe_update_blacklist(&mut self, bank_start: &BankStart) { + if self + .consensus_cache_updater + .maybe_update(&bank_start.working_bank) + { + self.blacklisted_accounts = self + .consensus_cache_updater + .consensus_accounts_cache() + .union(&HashSet::from_iter([self + .tip_manager + .tip_payment_program_id()])) + .cloned() + .collect(); + + debug!( + "updated blacklist with {} accounts", + self.blacklisted_accounts.len() + ); + } + } + + #[allow(clippy::too_many_arguments)] + fn do_process_bundles( + bundle_account_locker: &BundleAccountLocker, + tip_manager: &TipManager, + last_tip_updated_slot: &mut Slot, + cluster_info: &Arc, + block_builder_fee_info: &Arc>, + committer: &Committer, + recorder: &TransactionRecorder, + qos_service: &QosService, + log_messages_bytes_limit: &Option, + max_bundle_retry_duration: Duration, + reserved_space: &BundleReservedSpaceManager, + bundles: &[(ImmutableDeserializedBundle, SanitizedBundle)], + bank_start: &BankStart, + bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics, + ) -> Vec> { + // BundleAccountLocker holds RW locks for ALL accounts in ALL transactions within a single bundle. + // By pre-locking bundles before they're ready to be processed, it will prevent BankingStage from + // grabbing those locks so BundleStage can process as fast as possible. + // A LockedBundle is similar to TransactionBatch; once its dropped the locks are released. + #[allow(clippy::needless_collect)] + let (locked_bundle_results, locked_bundles_elapsed) = measure!( + bundles + .iter() + .map(|(_, sanitized_bundle)| { + bundle_account_locker + .prepare_locked_bundle(sanitized_bundle, &bank_start.working_bank) + }) + .collect::>(), + "locked_bundles_elapsed" + ); + bundle_stage_leader_metrics + .bundle_stage_metrics_tracker() + .increment_locked_bundle_elapsed_us(locked_bundles_elapsed.as_us()); + + let (execution_results, execute_locked_bundles_elapsed) = measure!(locked_bundle_results + .into_iter() + .map(|r| match r { + Ok(locked_bundle) => { + let (r, measure) = measure_us!(Self::process_bundle( + bundle_account_locker, + tip_manager, + last_tip_updated_slot, + cluster_info, + block_builder_fee_info, + committer, + recorder, + qos_service, + log_messages_bytes_limit, + max_bundle_retry_duration, + reserved_space, + &locked_bundle, + bank_start, + bundle_stage_leader_metrics, + )); + bundle_stage_leader_metrics + .leader_slot_metrics_tracker() + .increment_process_packets_transactions_us(measure); + r + } + Err(_) => { + Err(BundleExecutionError::LockError) + } + }) + .collect::>()); + + bundle_stage_leader_metrics + .bundle_stage_metrics_tracker() + .increment_execute_locked_bundles_elapsed_us(execute_locked_bundles_elapsed.as_us()); + execution_results.iter().for_each(|result| { + bundle_stage_leader_metrics + .bundle_stage_metrics_tracker() + .increment_bundle_execution_result(result); + }); + + execution_results + } + + #[allow(clippy::too_many_arguments)] + fn process_bundle( + bundle_account_locker: &BundleAccountLocker, + tip_manager: &TipManager, + last_tip_updated_slot: &mut Slot, + cluster_info: &Arc, + block_builder_fee_info: &Arc>, + committer: &Committer, + recorder: &TransactionRecorder, + qos_service: &QosService, + log_messages_bytes_limit: &Option, + max_bundle_retry_duration: Duration, + reserved_space: &BundleReservedSpaceManager, + locked_bundle: &LockedBundle, + bank_start: &BankStart, + bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics, + ) -> Result<(), BundleExecutionError> { + if !Bank::should_bank_still_be_processing_txs( + &bank_start.bank_creation_time, + bank_start.working_bank.ns_per_slot, + ) { + return Err(BundleExecutionError::BankProcessingTimeLimitReached); + } + + if Self::bundle_touches_tip_pdas( + locked_bundle.sanitized_bundle(), + &tip_manager.get_tip_accounts(), + ) && bank_start.working_bank.slot() != *last_tip_updated_slot + { + let start = Instant::now(); + let result = Self::handle_tip_programs( + bundle_account_locker, + tip_manager, + cluster_info, + block_builder_fee_info, + committer, + recorder, + qos_service, + log_messages_bytes_limit, + max_bundle_retry_duration, + reserved_space, + bank_start, + bundle_stage_leader_metrics, + ); + + bundle_stage_leader_metrics + .bundle_stage_metrics_tracker() + .increment_change_tip_receiver_elapsed_us(start.elapsed().as_micros() as u64); + + result?; + + *last_tip_updated_slot = bank_start.working_bank.slot(); + } + + Self::update_qos_and_execute_record_commit_bundle( + committer, + recorder, + qos_service, + log_messages_bytes_limit, + max_bundle_retry_duration, + reserved_space, + locked_bundle.sanitized_bundle(), + bank_start, + bundle_stage_leader_metrics, + )?; + + Ok(()) + } + + /// The validator needs to manage state on two programs related to tips + #[allow(clippy::too_many_arguments)] + fn handle_tip_programs( + bundle_account_locker: &BundleAccountLocker, + tip_manager: &TipManager, + cluster_info: &Arc, + block_builder_fee_info: &Arc>, + committer: &Committer, + recorder: &TransactionRecorder, + qos_service: &QosService, + log_messages_bytes_limit: &Option, + max_bundle_retry_duration: Duration, + reserved_space: &BundleReservedSpaceManager, + bank_start: &BankStart, + bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics, + ) -> Result<(), BundleExecutionError> { + debug!("handle_tip_programs"); + + // This will setup the tip payment and tip distribution program if they haven't been + // initialized yet, which is typically helpful for local validators. On mainnet and testnet, + // this code should never run. + let keypair = cluster_info.keypair().clone(); + let initialize_tip_programs_bundle = + tip_manager.get_initialize_tip_programs_bundle(&bank_start.working_bank, &keypair); + if let Some(bundle) = initialize_tip_programs_bundle { + debug!( + "initializing tip programs with {} transactions, bundle id: {}", + bundle.transactions.len(), + bundle.bundle_id + ); + + let locked_init_tip_programs_bundle = bundle_account_locker + .prepare_locked_bundle(&bundle, &bank_start.working_bank) + .map_err(|_| BundleExecutionError::TipError(TipError::LockError))?; + + Self::update_qos_and_execute_record_commit_bundle( + committer, + recorder, + qos_service, + log_messages_bytes_limit, + max_bundle_retry_duration, + reserved_space, + locked_init_tip_programs_bundle.sanitized_bundle(), + bank_start, + bundle_stage_leader_metrics, + ) + .map_err(|e| { + bundle_stage_leader_metrics + .bundle_stage_metrics_tracker() + .increment_num_init_tip_account_errors(1); + error!( + "bundle: {} error initializing tip programs: {:?}", + locked_init_tip_programs_bundle.sanitized_bundle().bundle_id, + e + ); + BundleExecutionError::TipError(TipError::InitializeProgramsError) + })?; + + bundle_stage_leader_metrics + .bundle_stage_metrics_tracker() + .increment_num_init_tip_account_ok(1); + } + + // There are two frequently run internal cranks inside the jito-solana validator that have to do with managing MEV tips. + // One is initialize the TipDistributionAccount, which is a validator's "tip piggy bank" for an epoch + // The other is ensuring the tip_receiver is configured correctly to ensure tips are routed to the correct + // address. The validator must drain the tip accounts to the previous tip receiver before setting the tip receiver to + // themselves. + + let kp = cluster_info.keypair().clone(); + let tip_crank_bundle = tip_manager.get_tip_programs_crank_bundle( + &bank_start.working_bank, + &kp, + &block_builder_fee_info.lock().unwrap(), + )?; + debug!("tip_crank_bundle is_some: {}", tip_crank_bundle.is_some()); + + if let Some(bundle) = tip_crank_bundle { + info!( + "bundle id: {} cranking tip programs with {} transactions", + bundle.bundle_id, + bundle.transactions.len() + ); + + let locked_tip_crank_bundle = bundle_account_locker + .prepare_locked_bundle(&bundle, &bank_start.working_bank) + .map_err(|_| BundleExecutionError::TipError(TipError::LockError))?; + + Self::update_qos_and_execute_record_commit_bundle( + committer, + recorder, + qos_service, + log_messages_bytes_limit, + max_bundle_retry_duration, + reserved_space, + locked_tip_crank_bundle.sanitized_bundle(), + bank_start, + bundle_stage_leader_metrics, + ) + .map_err(|e| { + bundle_stage_leader_metrics + .bundle_stage_metrics_tracker() + .increment_num_change_tip_receiver_errors(1); + error!( + "bundle: {} error cranking tip programs: {:?}", + locked_tip_crank_bundle.sanitized_bundle().bundle_id, + e + ); + BundleExecutionError::TipError(TipError::CrankTipError) + })?; + + bundle_stage_leader_metrics + .bundle_stage_metrics_tracker() + .increment_num_change_tip_receiver_ok(1); + } + + Ok(()) + } + + /// Reserves space for the entire bundle up-front to ensure the entire bundle can execute. + /// Rolls back the reserved space if there's not enough blockspace for all transactions in the bundle. + fn reserve_bundle_blockspace( + qos_service: &QosService, + reserved_space: &BundleReservedSpaceManager, + sanitized_bundle: &SanitizedBundle, + bank: &Arc, + ) -> BundleExecutionResult<(Vec>, usize)> { + let mut write_cost_tracker = bank.write_cost_tracker().unwrap(); + + // set the block cost limit to the original block cost limit, run the select + accumulate + // then reset back to the expected block cost limit. this allows bundle stage to potentially + // increase block_compute_limits, allocate the space, and reset the block_cost_limits to + // the reserved space without BankingStage racing to allocate this extra reserved space + write_cost_tracker.set_block_cost_limit(reserved_space.block_cost_limit()); + let (transaction_qos_cost_results, cost_model_throttled_transactions_count) = qos_service + .select_and_accumulate_transaction_costs( + bank, + &mut write_cost_tracker, + &sanitized_bundle.transactions, + std::iter::repeat(Ok(())), + ); + write_cost_tracker.set_block_cost_limit(reserved_space.expected_block_cost_limits(bank)); + drop(write_cost_tracker); + + // rollback all transaction costs if it can't fit and + if transaction_qos_cost_results.iter().any(|c| c.is_err()) { + QosService::remove_costs(transaction_qos_cost_results.iter(), None, bank); + return Err(BundleExecutionError::ExceedsCostModel); + } + + Ok(( + transaction_qos_cost_results, + cost_model_throttled_transactions_count, + )) + } + + fn update_qos_and_execute_record_commit_bundle( + committer: &Committer, + recorder: &TransactionRecorder, + qos_service: &QosService, + log_messages_bytes_limit: &Option, + max_bundle_retry_duration: Duration, + reserved_space: &BundleReservedSpaceManager, + sanitized_bundle: &SanitizedBundle, + bank_start: &BankStart, + bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics, + ) -> BundleExecutionResult<()> { + debug!( + "bundle: {} reserving blockspace for {} transactions", + sanitized_bundle.bundle_id, + sanitized_bundle.transactions.len() + ); + + let ( + (transaction_qos_cost_results, _cost_model_throttled_transactions_count), + cost_model_elapsed_us, + ) = measure_us!(Self::reserve_bundle_blockspace( + qos_service, + reserved_space, + sanitized_bundle, + &bank_start.working_bank + )?); + + debug!( + "bundle: {} executing, recording, and committing", + sanitized_bundle.bundle_id + ); + + let (result, process_transactions_us) = measure_us!(Self::execute_record_commit_bundle( + committer, + recorder, + log_messages_bytes_limit, + max_bundle_retry_duration, + sanitized_bundle, + bank_start, + )); + + bundle_stage_leader_metrics + .bundle_stage_metrics_tracker() + .increment_num_execution_retries(result.execution_metrics.num_retries); + bundle_stage_leader_metrics + .leader_slot_metrics_tracker() + .accumulate_transaction_errors(&result.transaction_error_counter); + bundle_stage_leader_metrics + .leader_slot_metrics_tracker() + .increment_process_transactions_us(process_transactions_us); + + let (cu, us) = result + .execute_and_commit_timings + .execute_timings + .accumulate_execute_units_and_time(); + qos_service.accumulate_actual_execute_cu(cu); + qos_service.accumulate_actual_execute_time(us); + + let num_committed = result + .commit_transaction_details + .iter() + .filter(|c| matches!(c, CommitTransactionDetails::Committed { .. })) + .count(); + bundle_stage_leader_metrics + .leader_slot_metrics_tracker() + .accumulate_process_transactions_summary(&ProcessTransactionsSummary { + reached_max_poh_height: matches!( + result.result, + Err(BundleExecutionError::BankProcessingTimeLimitReached) + | Err(BundleExecutionError::PohRecordError(_)) + ), + transactions_attempted_execution_count: sanitized_bundle.transactions.len(), + committed_transactions_count: num_committed, + // NOTE: this assumes that bundles are committed all-or-nothing + committed_transactions_with_successful_result_count: num_committed, + failed_commit_count: 0, + retryable_transaction_indexes: vec![], + cost_model_throttled_transactions_count: 0, + cost_model_us: cost_model_elapsed_us, + execute_and_commit_timings: result.execute_and_commit_timings, + error_counters: result.transaction_error_counter, + }); + + match result.result { + Ok(_) => { + // it's assumed that all transactions in the bundle executed, can update QoS + if !bank_start + .working_bank + .feature_set + .is_active(&feature_set::apply_cost_tracker_during_replay::id()) + { + QosService::update_costs( + transaction_qos_cost_results.iter(), + Some(&result.commit_transaction_details), + &bank_start.working_bank, + ); + } + + qos_service.report_metrics(bank_start.working_bank.slot()); + Ok(()) + } + Err(e) => { + // on bundle failure, none of the transactions are committed, so need to revert + // all compute reserved + QosService::remove_costs( + transaction_qos_cost_results.iter(), + None, + &bank_start.working_bank, + ); + qos_service.report_metrics(bank_start.working_bank.slot()); + + Err(e) + } + } + } + + fn execute_record_commit_bundle( + committer: &Committer, + recorder: &TransactionRecorder, + log_messages_bytes_limit: &Option, + max_bundle_retry_duration: Duration, + sanitized_bundle: &SanitizedBundle, + bank_start: &BankStart, + ) -> ExecuteRecordCommitResult { + let transaction_status_sender_enabled = committer.transaction_status_sender_enabled(); + + let mut execute_and_commit_timings = LeaderExecuteAndCommitTimings::default(); + + debug!("bundle: {} executing", sanitized_bundle.bundle_id); + let default_accounts = vec![None; sanitized_bundle.transactions.len()]; + let mut bundle_execution_results = load_and_execute_bundle( + &bank_start.working_bank, + sanitized_bundle, + MAX_PROCESSING_AGE, + &max_bundle_retry_duration, + transaction_status_sender_enabled, + transaction_status_sender_enabled, + transaction_status_sender_enabled, + transaction_status_sender_enabled, + log_messages_bytes_limit, + false, + None, + &default_accounts, + &default_accounts, + ); + + let execution_metrics = bundle_execution_results.metrics(); + + execute_and_commit_timings.collect_balances_us = execution_metrics.collect_balances_us; + execute_and_commit_timings.load_execute_us = execution_metrics.load_execute_us; + execute_and_commit_timings + .execute_timings + .accumulate(&execution_metrics.execute_timings); + + let mut transaction_error_counter = TransactionErrorMetrics::default(); + bundle_execution_results + .bundle_transaction_results() + .iter() + .for_each(|r| { + transaction_error_counter + .accumulate(&r.load_and_execute_transactions_output().error_counters); + }); + + debug!( + "bundle: {} executed, is_ok: {}", + sanitized_bundle.bundle_id, + bundle_execution_results.result().is_ok() + ); + + // don't commit bundle if failure executing any part of the bundle + if let Err(e) = bundle_execution_results.result() { + return ExecuteRecordCommitResult { + commit_transaction_details: vec![], + result: Err(e.clone().into()), + execution_metrics, + execute_and_commit_timings, + transaction_error_counter, + }; + } + + let (executed_batches, execution_results_to_transactions_us) = + measure_us!(bundle_execution_results.executed_transaction_batches()); + + debug!( + "bundle: {} recording {} batches of {:?} transactions", + sanitized_bundle.bundle_id, + executed_batches.len(), + executed_batches + .iter() + .map(|b| b.len()) + .collect::>() + ); + + let (freeze_lock, freeze_lock_us) = measure_us!(bank_start.working_bank.freeze_lock()); + execute_and_commit_timings.freeze_lock_us = freeze_lock_us; + + let (last_blockhash, lamports_per_signature) = bank_start + .working_bank + .last_blockhash_and_lamports_per_signature(); + + let ( + RecordTransactionsSummary { + result: record_transactions_result, + record_transactions_timings, + starting_transaction_index, + }, + record_us, + ) = measure_us!( + recorder.record_transactions(bank_start.working_bank.slot(), executed_batches) + ); + + execute_and_commit_timings.record_us = record_us; + execute_and_commit_timings.record_transactions_timings = record_transactions_timings; + execute_and_commit_timings + .record_transactions_timings + .execution_results_to_transactions_us = execution_results_to_transactions_us; + + debug!( + "bundle: {} record result: {}", + sanitized_bundle.bundle_id, + record_transactions_result.is_ok() + ); + + // don't commit bundle if failed to record + if let Err(e) = record_transactions_result { + return ExecuteRecordCommitResult { + commit_transaction_details: vec![], + result: Err(e.into()), + execution_metrics, + execute_and_commit_timings, + transaction_error_counter, + }; + } + + // note: execute_and_commit_timings.commit_us handled inside this function + let (commit_us, commit_bundle_details) = committer.commit_bundle( + &mut bundle_execution_results, + last_blockhash, + lamports_per_signature, + starting_transaction_index, + &bank_start.working_bank, + &mut execute_and_commit_timings, + ); + execute_and_commit_timings.commit_us = commit_us; + + drop(freeze_lock); + + // commit_bundle_details contains transactions that were and were not committed + // given the current implementation only executes, records, and commits bundles + // where all transactions executed, we can filter out the non-committed + // TODO (LB): does this make more sense in commit_bundle for future when failing bundles are accepted? + let commit_transaction_details = commit_bundle_details + .commit_transaction_details + .into_iter() + .flat_map(|commit_details| { + commit_details + .into_iter() + .filter(|d| matches!(d, CommitTransactionDetails::Committed { .. })) + }) + .collect(); + debug!( + "bundle: {} commit details: {:?}", + sanitized_bundle.bundle_id, commit_transaction_details + ); + + ExecuteRecordCommitResult { + commit_transaction_details, + result: Ok(()), + execution_metrics, + execute_and_commit_timings, + transaction_error_counter, + } + } + + /// Returns true if any of the transactions in a bundle mention one of the tip PDAs + fn bundle_touches_tip_pdas(bundle: &SanitizedBundle, tip_pdas: &HashSet) -> bool { + bundle.transactions.iter().any(|tx| { + tx.message() + .account_keys() + .iter() + .any(|a| tip_pdas.contains(a)) + }) + } +} + +#[cfg(test)] +mod tests { + use { + crate::{ + bundle_stage::{ + bundle_account_locker::BundleAccountLocker, bundle_consumer::BundleConsumer, + bundle_packet_deserializer::BundlePacketDeserializer, + bundle_reserved_space_manager::BundleReservedSpaceManager, + bundle_stage_leader_metrics::BundleStageLeaderMetrics, committer::Committer, + QosService, UnprocessedTransactionStorage, + }, + packet_bundle::PacketBundle, + proxy::block_engine_stage::BlockBuilderFeeInfo, + tip_manager::{TipDistributionAccountConfig, TipManager, TipManagerConfig}, + }, + crossbeam_channel::{unbounded, Receiver}, + jito_tip_distribution::sdk::derive_tip_distribution_account_address, + rand::{thread_rng, RngCore}, + solana_accounts_db::transaction_error_metrics::TransactionErrorMetrics, + solana_cost_model::{block_cost_limits::MAX_BLOCK_UNITS, cost_model::CostModel}, + solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo}, + solana_ledger::{ + blockstore::Blockstore, genesis_utils::create_genesis_config, + get_tmp_ledger_path_auto_delete, leader_schedule_cache::LeaderScheduleCache, + }, + solana_perf::packet::PacketBatch, + solana_poh::{ + poh_recorder::{PohRecorder, Record, WorkingBankEntry}, + poh_service::PohService, + }, + solana_program_test::programs::spl_programs, + solana_runtime::{ + bank::Bank, + genesis_utils::{create_genesis_config_with_leader_ex, GenesisConfigInfo}, + prioritization_fee_cache::PrioritizationFeeCache, + }, + solana_sdk::{ + bundle::{derive_bundle_id, SanitizedBundle}, + clock::MAX_PROCESSING_AGE, + feature_set::delay_visibility_of_program_deployment, + fee_calculator::{FeeRateGovernor, DEFAULT_TARGET_LAMPORTS_PER_SIGNATURE}, + genesis_config::ClusterType, + hash::Hash, + native_token::sol_to_lamports, + packet::Packet, + poh_config::PohConfig, + pubkey::Pubkey, + rent::Rent, + signature::{Keypair, Signer}, + system_transaction::transfer, + transaction::{SanitizedTransaction, TransactionError, VersionedTransaction}, + vote::state::VoteState, + }, + solana_streamer::socket::SocketAddrSpace, + std::{ + collections::{HashSet, VecDeque}, + str::FromStr, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, Mutex, RwLock, + }, + thread::{Builder, JoinHandle}, + time::Duration, + }, + }; + + struct TestFixture { + genesis_config_info: GenesisConfigInfo, + leader_keypair: Keypair, + bank: Arc, + exit: Arc, + poh_recorder: Arc>, + poh_simulator: JoinHandle<()>, + entry_receiver: Receiver, + } + + pub(crate) fn simulate_poh( + record_receiver: Receiver, + poh_recorder: &Arc>, + ) -> JoinHandle<()> { + let poh_recorder = poh_recorder.clone(); + let is_exited = poh_recorder.read().unwrap().is_exited.clone(); + let tick_producer = Builder::new() + .name("solana-simulate_poh".to_string()) + .spawn(move || loop { + PohService::read_record_receiver_and_process( + &poh_recorder, + &record_receiver, + Duration::from_millis(10), + ); + if is_exited.load(Ordering::Relaxed) { + break; + } + }); + tick_producer.unwrap() + } + + pub fn create_test_recorder( + bank: &Arc, + blockstore: Arc, + poh_config: Option, + leader_schedule_cache: Option>, + ) -> ( + Arc, + Arc>, + JoinHandle<()>, + Receiver, + ) { + let leader_schedule_cache = match leader_schedule_cache { + Some(provided_cache) => provided_cache, + None => Arc::new(LeaderScheduleCache::new_from_bank(bank)), + }; + let exit = Arc::new(AtomicBool::new(false)); + let poh_config = poh_config.unwrap_or_default(); + let (mut poh_recorder, entry_receiver, record_receiver) = PohRecorder::new( + bank.tick_height(), + bank.last_blockhash(), + bank.clone(), + Some((4, 4)), + bank.ticks_per_slot(), + &Pubkey::default(), + blockstore, + &leader_schedule_cache, + &poh_config, + exit.clone(), + ); + poh_recorder.set_bank(bank.clone(), false); + + let poh_recorder = Arc::new(RwLock::new(poh_recorder)); + let poh_simulator = simulate_poh(record_receiver, &poh_recorder); + + (exit, poh_recorder, poh_simulator, entry_receiver) + } + + fn create_test_fixture(mint_sol: u64) -> TestFixture { + let mint_keypair = Keypair::new(); + let leader_keypair = Keypair::new(); + let voting_keypair = Keypair::new(); + + let rent = Rent::default(); + + let mut genesis_config = create_genesis_config_with_leader_ex( + sol_to_lamports(mint_sol as f64), + &mint_keypair.pubkey(), + &leader_keypair.pubkey(), + &voting_keypair.pubkey(), + &solana_sdk::pubkey::new_rand(), + rent.minimum_balance(VoteState::size_of()) + sol_to_lamports(1_000_000.0), + sol_to_lamports(1_000_000.0), + FeeRateGovernor { + // Initialize with a non-zero fee + lamports_per_signature: DEFAULT_TARGET_LAMPORTS_PER_SIGNATURE / 2, + ..FeeRateGovernor::default() + }, + rent, // most tests don't expect rent + ClusterType::Development, + spl_programs(&rent), + ); + genesis_config.ticks_per_slot *= 8; + + // workaround for https://github.com/solana-labs/solana/issues/30085 + // the test can deploy and use spl_programs in the genensis slot without waiting for the next one + let mut bank = Bank::new_for_tests(&genesis_config); + bank.deactivate_feature(&delay_visibility_of_program_deployment::id()); + let bank = Arc::new(bank); + + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Arc::new( + Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"), + ); + + let (exit, poh_recorder, poh_simulator, entry_receiver) = + create_test_recorder(&bank, blockstore, Some(PohConfig::default()), None); + + let validator_pubkey = voting_keypair.pubkey(); + TestFixture { + genesis_config_info: GenesisConfigInfo { + genesis_config, + mint_keypair, + voting_keypair, + validator_pubkey, + }, + leader_keypair, + bank, + exit, + poh_recorder, + poh_simulator, + entry_receiver, + } + } + + fn make_random_overlapping_bundles( + mint_keypair: &Keypair, + num_bundles: usize, + num_packets_per_bundle: usize, + hash: Hash, + max_transfer_amount: u64, + ) -> Vec { + let mut rng = thread_rng(); + + (0..num_bundles) + .map(|_| { + let transfers: Vec<_> = (0..num_packets_per_bundle) + .map(|_| { + VersionedTransaction::from(transfer( + mint_keypair, + &mint_keypair.pubkey(), + rng.next_u64() % max_transfer_amount, + hash, + )) + }) + .collect(); + let bundle_id = derive_bundle_id(&transfers); + + PacketBundle { + batch: PacketBatch::new( + transfers + .iter() + .map(|tx| Packet::from_data(None, tx).unwrap()) + .collect(), + ), + bundle_id, + } + }) + .collect() + } + + fn get_tip_manager(vote_account: &Pubkey) -> TipManager { + TipManager::new(TipManagerConfig { + tip_payment_program_id: Pubkey::from_str("T1pyyaTNZsKv2WcRAB8oVnk93mLJw2XzjtVYqCsaHqt") + .unwrap(), + tip_distribution_program_id: Pubkey::from_str( + "4R3gSG8BpU4t19KYj8CfnbtRpnT8gtk4dvTHxVRwc2r7", + ) + .unwrap(), + tip_distribution_account_config: TipDistributionAccountConfig { + merkle_root_upload_authority: Pubkey::new_unique(), + vote_account: *vote_account, + commission_bps: 10, + }, + }) + } + + /// Happy-path bundle execution w/ no tip management + #[test] + fn test_bundle_no_tip_success() { + solana_logger::setup(); + let TestFixture { + genesis_config_info, + leader_keypair, + bank, + exit, + poh_recorder, + poh_simulator, + entry_receiver, + } = create_test_fixture(1_000_000); + let recorder = poh_recorder.read().unwrap().new_recorder(); + + let status = poh_recorder.read().unwrap().reached_leader_slot(); + info!("status: {:?}", status); + + let (replay_vote_sender, _replay_vote_receiver) = unbounded(); + let committer = Committer::new( + None, + replay_vote_sender, + Arc::new(PrioritizationFeeCache::new(0u64)), + ); + + let block_builder_pubkey = Pubkey::new_unique(); + let tip_manager = get_tip_manager(&genesis_config_info.voting_keypair.pubkey()); + let block_builder_info = Arc::new(Mutex::new(BlockBuilderFeeInfo { + block_builder: block_builder_pubkey, + block_builder_commission: 10, + })); + + let cluster_info = Arc::new(ClusterInfo::new( + ContactInfo::new(leader_keypair.pubkey(), 0, 0), + Arc::new(leader_keypair), + SocketAddrSpace::new(true), + )); + + let mut consumer = BundleConsumer::new( + committer, + recorder, + QosService::new(1), + None, + tip_manager, + BundleAccountLocker::default(), + block_builder_info, + Duration::from_secs(10), + cluster_info, + BundleReservedSpaceManager::new( + MAX_BLOCK_UNITS, + 3_000_000, + poh_recorder + .read() + .unwrap() + .ticks_per_slot() + .saturating_mul(8) + .saturating_div(10), + ), + ); + + let bank_start = poh_recorder.read().unwrap().bank_start().unwrap(); + + let mut bundle_storage = UnprocessedTransactionStorage::new_bundle_storage( + VecDeque::with_capacity(10), + VecDeque::with_capacity(10), + ); + let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(1); + + let mut packet_bundles = make_random_overlapping_bundles( + &genesis_config_info.mint_keypair, + 1, + 3, + genesis_config_info.genesis_config.hash(), + 10_000, + ); + let deserialized_bundle = BundlePacketDeserializer::deserialize_bundle( + packet_bundles.get_mut(0).unwrap(), + false, + None, + ) + .unwrap(); + let mut error_metrics = TransactionErrorMetrics::default(); + let sanitized_bundle = deserialized_bundle + .build_sanitized_bundle( + &bank_start.working_bank, + &HashSet::default(), + &mut error_metrics, + ) + .unwrap(); + + let summary = bundle_storage.insert_bundles(vec![deserialized_bundle]); + assert_eq!( + summary.num_packets_inserted, + sanitized_bundle.transactions.len() + ); + assert_eq!(summary.num_bundles_dropped, 0); + assert_eq!(summary.num_bundles_inserted, 1); + + consumer.consume_buffered_bundles( + &bank_start, + &mut bundle_storage, + &mut bundle_stage_leader_metrics, + ); + + let mut transactions = Vec::new(); + while let Ok(WorkingBankEntry { + bank: wbe_bank, + entries_ticks, + }) = entry_receiver.recv() + { + assert_eq!(bank.slot(), wbe_bank.slot()); + for (entry, _) in entries_ticks { + if !entry.transactions.is_empty() { + // transactions in this test are all overlapping, so each entry will contain 1 transaction + assert_eq!(entry.transactions.len(), 1); + transactions.extend(entry.transactions); + } + } + if transactions.len() == sanitized_bundle.transactions.len() { + break; + } + } + + let bundle_versioned_transactions: Vec<_> = sanitized_bundle + .transactions + .iter() + .map(|tx| tx.to_versioned_transaction()) + .collect(); + assert_eq!(transactions, bundle_versioned_transactions); + + let check_results = bank.check_transactions( + &sanitized_bundle.transactions, + &vec![Ok(()); sanitized_bundle.transactions.len()], + MAX_PROCESSING_AGE, + &mut error_metrics, + ); + assert_eq!( + check_results, + vec![ + (Err(TransactionError::AlreadyProcessed), None); + sanitized_bundle.transactions.len() + ] + ); + + poh_recorder + .write() + .unwrap() + .is_exited + .store(true, Ordering::Relaxed); + exit.store(true, Ordering::Relaxed); + poh_simulator.join().unwrap(); + // TODO (LB): cleanup blockstore + } + + /// Happy-path bundle execution to ensure tip management works. + /// Tip management involves cranking setup bundles before executing the test bundle + #[test] + fn test_bundle_tip_program_setup_success() { + solana_logger::setup(); + let TestFixture { + genesis_config_info, + leader_keypair, + bank, + exit, + poh_recorder, + poh_simulator, + entry_receiver, + } = create_test_fixture(1_000_000); + let recorder = poh_recorder.read().unwrap().new_recorder(); + + let (replay_vote_sender, _replay_vote_receiver) = unbounded(); + let committer = Committer::new( + None, + replay_vote_sender, + Arc::new(PrioritizationFeeCache::new(0u64)), + ); + + let block_builder_pubkey = Pubkey::new_unique(); + let tip_manager = get_tip_manager(&genesis_config_info.voting_keypair.pubkey()); + let block_builder_info = Arc::new(Mutex::new(BlockBuilderFeeInfo { + block_builder: block_builder_pubkey, + block_builder_commission: 10, + })); + + let cluster_info = Arc::new(ClusterInfo::new( + ContactInfo::new(leader_keypair.pubkey(), 0, 0), + Arc::new(leader_keypair), + SocketAddrSpace::new(true), + )); + + let mut consumer = BundleConsumer::new( + committer, + recorder, + QosService::new(1), + None, + tip_manager.clone(), + BundleAccountLocker::default(), + block_builder_info, + Duration::from_secs(10), + cluster_info.clone(), + BundleReservedSpaceManager::new( + MAX_BLOCK_UNITS, + 3_000_000, + poh_recorder + .read() + .unwrap() + .ticks_per_slot() + .saturating_mul(8) + .saturating_div(10), + ), + ); + + let bank_start = poh_recorder.read().unwrap().bank_start().unwrap(); + + let mut bundle_storage = UnprocessedTransactionStorage::new_bundle_storage( + VecDeque::with_capacity(10), + VecDeque::with_capacity(10), + ); + let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(1); + // MAIN LOGIC + + // a bundle that tips the tip program + let tip_accounts = tip_manager.get_tip_accounts(); + let tip_account = tip_accounts.iter().collect::>()[0]; + let mut packet_bundle = PacketBundle { + batch: PacketBatch::new(vec![Packet::from_data( + None, + transfer( + &genesis_config_info.mint_keypair, + tip_account, + 1, + genesis_config_info.genesis_config.hash(), + ), + ) + .unwrap()]), + bundle_id: "test_transfer".to_string(), + }; + + let deserialized_bundle = + BundlePacketDeserializer::deserialize_bundle(&mut packet_bundle, false, None).unwrap(); + let mut error_metrics = TransactionErrorMetrics::default(); + let sanitized_bundle = deserialized_bundle + .build_sanitized_bundle( + &bank_start.working_bank, + &HashSet::default(), + &mut error_metrics, + ) + .unwrap(); + + let summary = bundle_storage.insert_bundles(vec![deserialized_bundle]); + assert_eq!(summary.num_bundles_inserted, 1); + assert_eq!(summary.num_packets_inserted, 1); + assert_eq!(summary.num_bundles_dropped, 0); + + consumer.consume_buffered_bundles( + &bank_start, + &mut bundle_storage, + &mut bundle_stage_leader_metrics, + ); + + // its expected there are 3 transactions. One to initialize the tip program configuration, one to change the tip receiver, + // and another with the tip + + let mut transactions = Vec::new(); + while let Ok(WorkingBankEntry { + bank: wbe_bank, + entries_ticks, + }) = entry_receiver.recv() + { + assert_eq!(bank.slot(), wbe_bank.slot()); + transactions.extend(entries_ticks.into_iter().flat_map(|(e, _)| e.transactions)); + if transactions.len() == 5 { + break; + } + } + + // tip management on the first bundle involves: + // calling initialize on the tip payment and tip distribution programs + // creating the tip distribution account for this validator's epoch (the MEV piggy bank) + // changing the tip receiver and block builder tx + // the original transfer that was sent + let keypair = cluster_info.keypair().clone(); + + assert_eq!( + transactions[0], + tip_manager + .initialize_tip_payment_program_tx(bank.last_blockhash(), &keypair) + .to_versioned_transaction() + ); + assert_eq!( + transactions[1], + tip_manager + .initialize_tip_distribution_config_tx(bank.last_blockhash(), &keypair) + .to_versioned_transaction() + ); + assert_eq!( + transactions[2], + tip_manager + .initialize_tip_distribution_account_tx( + bank.last_blockhash(), + bank.epoch(), + &keypair + ) + .to_versioned_transaction() + ); + // the first tip receiver + block builder are the initializer (keypair.pubkey()) as set by the + // TipPayment program during initialization + assert_eq!( + transactions[3], + tip_manager + .build_change_tip_receiver_and_block_builder_tx( + &keypair.pubkey(), + &derive_tip_distribution_account_address( + &tip_manager.tip_distribution_program_id(), + &genesis_config_info.validator_pubkey, + bank_start.working_bank.epoch() + ) + .0, + &bank_start.working_bank, + &keypair, + &keypair.pubkey(), + &block_builder_pubkey, + 10 + ) + .to_versioned_transaction() + ); + assert_eq!( + transactions[4], + sanitized_bundle.transactions[0].to_versioned_transaction() + ); + + poh_recorder + .write() + .unwrap() + .is_exited + .store(true, Ordering::Relaxed); + exit.store(true, Ordering::Relaxed); + poh_simulator.join().unwrap(); + } + + #[test] + fn test_handle_tip_programs() { + solana_logger::setup(); + let TestFixture { + genesis_config_info, + leader_keypair, + bank, + exit, + poh_recorder, + poh_simulator, + entry_receiver, + } = create_test_fixture(1_000_000); + let recorder = poh_recorder.read().unwrap().new_recorder(); + + let (replay_vote_sender, _replay_vote_receiver) = unbounded(); + let committer = Committer::new( + None, + replay_vote_sender, + Arc::new(PrioritizationFeeCache::new(0u64)), + ); + + let block_builder_pubkey = Pubkey::new_unique(); + let tip_manager = get_tip_manager(&genesis_config_info.voting_keypair.pubkey()); + let block_builder_info = Arc::new(Mutex::new(BlockBuilderFeeInfo { + block_builder: block_builder_pubkey, + block_builder_commission: 10, + })); + + let cluster_info = Arc::new(ClusterInfo::new( + ContactInfo::new(leader_keypair.pubkey(), 0, 0), + Arc::new(leader_keypair), + SocketAddrSpace::new(true), + )); + + let bank_start = poh_recorder.read().unwrap().bank_start().unwrap(); + + let reserved_ticks = bank.max_tick_height().saturating_mul(8).saturating_div(10); + + // The first 80% of the block, based on poh ticks, has `preallocated_bundle_cost` less compute units. + // The last 20% has has full compute so blockspace is maximized if BundleStage is idle. + let reserved_space = + BundleReservedSpaceManager::new(MAX_BLOCK_UNITS, 3_000_000, reserved_ticks); + let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(1); + assert_matches!( + BundleConsumer::handle_tip_programs( + &BundleAccountLocker::default(), + &tip_manager, + &cluster_info, + &block_builder_info, + &committer, + &recorder, + &QosService::new(1), + &None, + Duration::from_secs(10), + &reserved_space, + &bank_start, + &mut bundle_stage_leader_metrics + ), + Ok(()) + ); + + let mut transactions = Vec::new(); + while let Ok(WorkingBankEntry { + bank: wbe_bank, + entries_ticks, + }) = entry_receiver.recv() + { + assert_eq!(bank.slot(), wbe_bank.slot()); + transactions.extend(entries_ticks.into_iter().flat_map(|(e, _)| e.transactions)); + if transactions.len() == 4 { + break; + } + } + + let keypair = cluster_info.keypair().clone(); + // expect to see initialize tip payment program, tip distribution program, initialize tip distribution account, change tip receiver + change block builder + assert_eq!( + transactions[0], + tip_manager + .initialize_tip_payment_program_tx(bank.last_blockhash(), &keypair) + .to_versioned_transaction() + ); + assert_eq!( + transactions[1], + tip_manager + .initialize_tip_distribution_config_tx(bank.last_blockhash(), &keypair) + .to_versioned_transaction() + ); + assert_eq!( + transactions[2], + tip_manager + .initialize_tip_distribution_account_tx( + bank.last_blockhash(), + bank.epoch(), + &keypair + ) + .to_versioned_transaction() + ); + // the first tip receiver + block builder are the initializer (keypair.pubkey()) as set by the + // TipPayment program during initialization + assert_eq!( + transactions[3], + tip_manager + .build_change_tip_receiver_and_block_builder_tx( + &keypair.pubkey(), + &derive_tip_distribution_account_address( + &tip_manager.tip_distribution_program_id(), + &genesis_config_info.validator_pubkey, + bank_start.working_bank.epoch() + ) + .0, + &bank_start.working_bank, + &keypair, + &keypair.pubkey(), + &block_builder_pubkey, + 10 + ) + .to_versioned_transaction() + ); + + poh_recorder + .write() + .unwrap() + .is_exited + .store(true, Ordering::Relaxed); + exit.store(true, Ordering::Relaxed); + poh_simulator.join().unwrap(); + } + + #[test] + fn test_reserve_bundle_blockspace_success() { + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + + let keypair1 = Keypair::new(); + let keypair2 = Keypair::new(); + let transfer_tx = SanitizedTransaction::from_transaction_for_tests(transfer( + &keypair1, + &keypair2.pubkey(), + 1, + bank.parent_hash(), + )); + let sanitized_bundle = SanitizedBundle { + transactions: vec![transfer_tx], + bundle_id: String::default(), + }; + + let transfer_cost = + CostModel::calculate_cost(&sanitized_bundle.transactions[0], &bank.feature_set); + + let qos_service = QosService::new(1); + let reserved_ticks = bank.max_tick_height().saturating_mul(8).saturating_div(10); + + // The first 80% of the block, based on poh ticks, has `preallocated_bundle_cost` less compute units. + // The last 20% has has full compute so blockspace is maximized if BundleStage is idle. + let reserved_space = + BundleReservedSpaceManager::new(MAX_BLOCK_UNITS, 3_000_000, reserved_ticks); + + assert!(BundleConsumer::reserve_bundle_blockspace( + &qos_service, + &reserved_space, + &sanitized_bundle, + &bank + ) + .is_ok()); + assert_eq!( + bank.read_cost_tracker().unwrap().block_cost(), + transfer_cost.sum() + ); + } + + #[test] + fn test_reserve_bundle_blockspace_failure() { + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + + let keypair1 = Keypair::new(); + let keypair2 = Keypair::new(); + let transfer_tx1 = SanitizedTransaction::from_transaction_for_tests(transfer( + &keypair1, + &keypair2.pubkey(), + 1, + bank.parent_hash(), + )); + let transfer_tx2 = SanitizedTransaction::from_transaction_for_tests(transfer( + &keypair1, + &keypair2.pubkey(), + 2, + bank.parent_hash(), + )); + let sanitized_bundle = SanitizedBundle { + transactions: vec![transfer_tx1, transfer_tx2], + bundle_id: String::default(), + }; + + // set block cost limit to 1 transfer transaction, try to process 2, should return an error + // and rollback block cost added + let transfer_cost = + CostModel::calculate_cost(&sanitized_bundle.transactions[0], &bank.feature_set); + bank.write_cost_tracker() + .unwrap() + .set_block_cost_limit(transfer_cost.sum()); + + let qos_service = QosService::new(1); + let reserved_ticks = bank.max_tick_height().saturating_mul(8).saturating_div(10); + + // The first 80% of the block, based on poh ticks, has `preallocated_bundle_cost` less compute units. + // The last 20% has has full compute so blockspace is maximized if BundleStage is idle. + let reserved_space = BundleReservedSpaceManager::new( + bank.read_cost_tracker().unwrap().block_cost(), + 50, + reserved_ticks, + ); + + assert!(BundleConsumer::reserve_bundle_blockspace( + &qos_service, + &reserved_space, + &sanitized_bundle, + &bank + ) + .is_err()); + assert_eq!(bank.read_cost_tracker().unwrap().block_cost(), 0); + assert_eq!( + bank.read_cost_tracker().unwrap().block_cost_limit(), + bank.read_cost_tracker() + .unwrap() + .block_cost_limit() + .saturating_sub(50) + ); + } +} diff --git a/core/src/bundle_stage/bundle_packet_deserializer.rs b/core/src/bundle_stage/bundle_packet_deserializer.rs new file mode 100644 index 0000000000..02fc9ff6a1 --- /dev/null +++ b/core/src/bundle_stage/bundle_packet_deserializer.rs @@ -0,0 +1,284 @@ +//! Deserializes PacketBundles +use { + crate::{ + immutable_deserialized_bundle::{DeserializedBundleError, ImmutableDeserializedBundle}, + packet_bundle::PacketBundle, + }, + crossbeam_channel::{Receiver, RecvTimeoutError}, + solana_runtime::bank_forks::BankForks, + solana_sdk::saturating_add_assign, + std::{ + sync::{Arc, RwLock}, + time::{Duration, Instant}, + }, +}; + +/// Results from deserializing packet batches. +#[derive(Debug)] +pub struct ReceiveBundleResults { + /// Deserialized bundles from all received bundle packets + pub deserialized_bundles: Vec, + /// Number of dropped bundles + pub num_dropped_bundles: usize, + /// Number of dropped packets + pub num_dropped_packets: usize, +} + +pub struct BundlePacketDeserializer { + /// Receiver for bundle packets + bundle_packet_receiver: Receiver>, + /// Provides working bank for deserializer to check feature activation + bank_forks: Arc>, + /// Max packets per bundle + max_packets_per_bundle: Option, +} + +impl BundlePacketDeserializer { + pub fn new( + bundle_packet_receiver: Receiver>, + bank_forks: Arc>, + max_packets_per_bundle: Option, + ) -> Self { + Self { + bundle_packet_receiver, + bank_forks, + max_packets_per_bundle, + } + } + + /// Handles receiving bundles and deserializing them + pub fn receive_bundles( + &self, + recv_timeout: Duration, + capacity: usize, + ) -> Result { + let (bundle_count, _packet_count, mut bundles) = + self.receive_until(recv_timeout, capacity)?; + + // Note: this can be removed after feature `round_compute_unit_price` is activated in + // mainnet-beta + let _working_bank = self.bank_forks.read().unwrap().working_bank(); + let round_compute_unit_price_enabled = false; // TODO get from working_bank.feature_set + + Ok(Self::deserialize_and_collect_bundles( + bundle_count, + &mut bundles, + round_compute_unit_price_enabled, + self.max_packets_per_bundle, + )) + } + + /// Deserialize packet batches, aggregates tracer packet stats, and collect + /// them into ReceivePacketResults + fn deserialize_and_collect_bundles( + bundle_count: usize, + bundles: &mut [PacketBundle], + round_compute_unit_price_enabled: bool, + max_packets_per_bundle: Option, + ) -> ReceiveBundleResults { + let mut deserialized_bundles = Vec::with_capacity(bundle_count); + let mut num_dropped_bundles: usize = 0; + let mut num_dropped_packets: usize = 0; + + for bundle in bundles.iter_mut() { + match Self::deserialize_bundle( + bundle, + round_compute_unit_price_enabled, + max_packets_per_bundle, + ) { + Ok(deserialized_bundle) => { + deserialized_bundles.push(deserialized_bundle); + } + Err(_) => { + // TODO (LB): prob wanna collect stats here + saturating_add_assign!(num_dropped_bundles, 1); + saturating_add_assign!(num_dropped_packets, bundle.batch.len()); + } + } + } + + ReceiveBundleResults { + deserialized_bundles, + num_dropped_bundles, + num_dropped_packets, + } + } + + /// Receives bundle packets + fn receive_until( + &self, + recv_timeout: Duration, + bundle_count_upperbound: usize, + ) -> Result<(usize, usize, Vec), RecvTimeoutError> { + let start = Instant::now(); + + let mut bundles = self.bundle_packet_receiver.recv_timeout(recv_timeout)?; + let mut num_packets_received: usize = bundles.iter().map(|pb| pb.batch.len()).sum(); + let mut num_bundles_received: usize = bundles.len(); + + if num_bundles_received <= bundle_count_upperbound { + while let Ok(bundle_packets) = self.bundle_packet_receiver.try_recv() { + trace!("got more packet batches in bundle packet deserializer"); + + saturating_add_assign!( + num_packets_received, + bundle_packets + .iter() + .map(|pb| pb.batch.len()) + .sum::() + ); + saturating_add_assign!(num_bundles_received, bundle_packets.len()); + + bundles.extend(bundle_packets); + + if start.elapsed() >= recv_timeout + || num_bundles_received >= bundle_count_upperbound + { + break; + } + } + } + + Ok((num_bundles_received, num_packets_received, bundles)) + } + + /// Deserializes the Bundle into DeserializedBundlePackets, returning None if any packet in the + /// bundle failed to deserialize + pub fn deserialize_bundle( + bundle: &mut PacketBundle, + round_compute_unit_price_enabled: bool, + max_packets_per_bundle: Option, + ) -> Result { + bundle.batch.iter_mut().for_each(|p| { + p.meta_mut() + .set_round_compute_unit_price(round_compute_unit_price_enabled); + }); + + ImmutableDeserializedBundle::new(bundle, max_packets_per_bundle) + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + crossbeam_channel::unbounded, + solana_ledger::genesis_utils::create_genesis_config, + solana_perf::packet::PacketBatch, + solana_runtime::{bank::Bank, genesis_utils::GenesisConfigInfo}, + solana_sdk::{packet::Packet, signature::Signer, system_transaction::transfer}, + }; + + #[test] + fn test_deserialize_and_collect_bundles_empty() { + let results = + BundlePacketDeserializer::deserialize_and_collect_bundles(0, &mut [], false, Some(5)); + assert_eq!(results.deserialized_bundles.len(), 0); + assert_eq!(results.num_dropped_packets, 0); + assert_eq!(results.num_dropped_bundles, 0); + } + + #[test] + fn test_receive_bundles_capacity() { + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(10_000); + let bank_forks = + BankForks::new_rw_arc(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + + let (sender, receiver) = unbounded(); + + let deserializer = BundlePacketDeserializer::new(receiver, bank_forks, Some(10)); + + let packet_bundles: Vec<_> = (0..10) + .map(|_| PacketBundle { + batch: PacketBatch::new(vec![Packet::from_data( + None, + transfer( + &mint_keypair, + &mint_keypair.pubkey(), + 100, + genesis_config.hash(), + ), + ) + .unwrap()]), + bundle_id: String::default(), + }) + .collect(); + + sender.send(packet_bundles.clone()).unwrap(); + + let bundles = deserializer + .receive_bundles(Duration::from_millis(100), 5) + .unwrap(); + // this is confusing, but it's sent as one batch + assert_eq!(bundles.deserialized_bundles.len(), 10); + assert_eq!(bundles.num_dropped_bundles, 0); + assert_eq!(bundles.num_dropped_packets, 0); + + // make sure empty + assert_matches!( + deserializer.receive_bundles(Duration::from_millis(100), 5), + Err(RecvTimeoutError::Timeout) + ); + + // send 2x 10 size batches. capacity is 5, but will return 10 since that's the batch size + sender.send(packet_bundles.clone()).unwrap(); + sender.send(packet_bundles).unwrap(); + let bundles = deserializer + .receive_bundles(Duration::from_millis(100), 5) + .unwrap(); + assert_eq!(bundles.deserialized_bundles.len(), 10); + assert_eq!(bundles.num_dropped_bundles, 0); + assert_eq!(bundles.num_dropped_packets, 0); + + let bundles = deserializer + .receive_bundles(Duration::from_millis(100), 5) + .unwrap(); + assert_eq!(bundles.deserialized_bundles.len(), 10); + assert_eq!(bundles.num_dropped_bundles, 0); + assert_eq!(bundles.num_dropped_packets, 0); + + assert_matches!( + deserializer.receive_bundles(Duration::from_millis(100), 5), + Err(RecvTimeoutError::Timeout) + ); + } + + #[test] + fn test_receive_bundles_bad_bundles() { + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair: _, + .. + } = create_genesis_config(10_000); + let bank_forks = + BankForks::new_rw_arc(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + + let (sender, receiver) = unbounded(); + + let deserializer = BundlePacketDeserializer::new(receiver, bank_forks, Some(10)); + + let packet_bundles: Vec<_> = (0..10) + .map(|_| PacketBundle { + batch: PacketBatch::new(vec![]), + bundle_id: String::default(), + }) + .collect(); + sender.send(packet_bundles).unwrap(); + + let bundles = deserializer + .receive_bundles(Duration::from_millis(100), 5) + .unwrap(); + // this is confusing, but it's sent as one batch + assert_eq!(bundles.deserialized_bundles.len(), 0); + assert_eq!(bundles.num_dropped_bundles, 10); + assert_eq!(bundles.num_dropped_packets, 0); + } +} diff --git a/core/src/bundle_stage/bundle_packet_receiver.rs b/core/src/bundle_stage/bundle_packet_receiver.rs new file mode 100644 index 0000000000..5e65a07705 --- /dev/null +++ b/core/src/bundle_stage/bundle_packet_receiver.rs @@ -0,0 +1,839 @@ +use { + super::BundleStageLoopMetrics, + crate::{ + banking_stage::unprocessed_transaction_storage::UnprocessedTransactionStorage, + bundle_stage::{ + bundle_packet_deserializer::{BundlePacketDeserializer, ReceiveBundleResults}, + bundle_stage_leader_metrics::BundleStageLeaderMetrics, + }, + immutable_deserialized_bundle::ImmutableDeserializedBundle, + packet_bundle::PacketBundle, + }, + crossbeam_channel::{Receiver, RecvTimeoutError}, + solana_measure::{measure::Measure, measure_us}, + solana_runtime::bank_forks::BankForks, + solana_sdk::timing::timestamp, + std::{ + sync::{Arc, RwLock}, + time::Duration, + }, +}; + +pub struct BundleReceiver { + id: u32, + bundle_packet_deserializer: BundlePacketDeserializer, +} + +impl BundleReceiver { + pub fn new( + id: u32, + bundle_packet_receiver: Receiver>, + bank_forks: Arc>, + max_packets_per_bundle: Option, + ) -> Self { + Self { + id, + bundle_packet_deserializer: BundlePacketDeserializer::new( + bundle_packet_receiver, + bank_forks, + max_packets_per_bundle, + ), + } + } + + /// Receive incoming packets, push into unprocessed buffer with packet indexes + pub fn receive_and_buffer_bundles( + &mut self, + unprocessed_bundle_storage: &mut UnprocessedTransactionStorage, + bundle_stage_metrics: &mut BundleStageLoopMetrics, + bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics, + ) -> Result<(), RecvTimeoutError> { + let (result, recv_time_us) = measure_us!({ + let recv_timeout = Self::get_receive_timeout(unprocessed_bundle_storage); + let mut recv_and_buffer_measure = Measure::start("recv_and_buffer"); + self.bundle_packet_deserializer + .receive_bundles(recv_timeout, unprocessed_bundle_storage.max_receive_size()) + // Consumes results if Ok, otherwise we keep the Err + .map(|receive_bundle_results| { + self.buffer_bundles( + receive_bundle_results, + unprocessed_bundle_storage, + bundle_stage_metrics, + // tracer_packet_stats, + bundle_stage_leader_metrics, + ); + recv_and_buffer_measure.stop(); + bundle_stage_metrics.increment_receive_and_buffer_bundles_elapsed_us( + recv_and_buffer_measure.as_us(), + ); + }) + }); + + bundle_stage_leader_metrics + .leader_slot_metrics_tracker() + .increment_receive_and_buffer_packets_us(recv_time_us); + + result + } + + fn get_receive_timeout( + unprocessed_transaction_storage: &UnprocessedTransactionStorage, + ) -> Duration { + // Gossip thread will almost always not wait because the transaction storage will most likely not be empty + if !unprocessed_transaction_storage.is_empty() { + // If there are buffered packets, run the equivalent of try_recv to try reading more + // packets. This prevents starving BankingStage::consume_buffered_packets due to + // buffered_packet_batches containing transactions that exceed the cost model for + // the current bank. + Duration::from_millis(0) + } else { + // BundleStage should pick up a working_bank as fast as possible + Duration::from_millis(100) + } + } + + fn buffer_bundles( + &self, + ReceiveBundleResults { + deserialized_bundles, + num_dropped_bundles: _, + num_dropped_packets: _, + }: ReceiveBundleResults, + unprocessed_transaction_storage: &mut UnprocessedTransactionStorage, + bundle_stage_stats: &mut BundleStageLoopMetrics, + bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics, + ) { + let bundle_count = deserialized_bundles.len(); + let packet_count: usize = deserialized_bundles.iter().map(|b| b.len()).sum(); + + bundle_stage_stats.increment_num_bundles_received(bundle_count as u64); + bundle_stage_stats.increment_num_packets_received(packet_count as u64); + bundle_stage_leader_metrics + .leader_slot_metrics_tracker() + .increment_total_new_valid_packets(packet_count as u64); + + debug!( + "@{:?} bundles: {} txs: {} id: {}", + timestamp(), + bundle_count, + packet_count, + self.id + ); + + Self::push_unprocessed( + unprocessed_transaction_storage, + deserialized_bundles, + bundle_stage_leader_metrics, + bundle_stage_stats, + ); + } + + fn push_unprocessed( + unprocessed_transaction_storage: &mut UnprocessedTransactionStorage, + deserialized_bundles: Vec, + bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics, + bundle_stage_stats: &mut BundleStageLoopMetrics, + ) { + if !deserialized_bundles.is_empty() { + let insert_bundles_summary = + unprocessed_transaction_storage.insert_bundles(deserialized_bundles); + + bundle_stage_stats.increment_newly_buffered_bundles_count( + insert_bundles_summary.num_bundles_inserted as u64, + ); + bundle_stage_stats + .increment_num_bundles_dropped(insert_bundles_summary.num_bundles_dropped as u64); + + bundle_stage_leader_metrics + .leader_slot_metrics_tracker() + .increment_newly_buffered_packets_count( + insert_bundles_summary.num_packets_inserted as u64, + ); + + bundle_stage_leader_metrics + .leader_slot_metrics_tracker() + .accumulate_insert_packet_batches_summary( + &insert_bundles_summary.insert_packets_summary, + ); + } + } +} + +/// This tests functionality of BundlePacketReceiver and the internals of BundleStorage because +/// they're tightly intertwined +#[cfg(test)] +mod tests { + use { + super::*, + crossbeam_channel::unbounded, + rand::{thread_rng, RngCore}, + solana_bundle::{ + bundle_execution::LoadAndExecuteBundleError, BundleExecutionError, TipError, + }, + solana_ledger::genesis_utils::create_genesis_config, + solana_perf::packet::PacketBatch, + solana_poh::poh_recorder::PohRecorderError, + solana_runtime::{bank::Bank, genesis_utils::GenesisConfigInfo}, + solana_sdk::{ + bundle::{derive_bundle_id, SanitizedBundle}, + hash::Hash, + packet::Packet, + signature::{Keypair, Signer}, + system_transaction::transfer, + transaction::VersionedTransaction, + }, + std::collections::{HashSet, VecDeque}, + }; + + /// Makes `num_bundles` random bundles with `num_packets_per_bundle` packets per bundle. + fn make_random_bundles( + mint_keypair: &Keypair, + num_bundles: usize, + num_packets_per_bundle: usize, + hash: Hash, + ) -> Vec { + let mut rng = thread_rng(); + + (0..num_bundles) + .map(|_| { + let transfers: Vec<_> = (0..num_packets_per_bundle) + .map(|_| { + VersionedTransaction::from(transfer( + mint_keypair, + &mint_keypair.pubkey(), + rng.next_u64(), + hash, + )) + }) + .collect(); + let bundle_id = derive_bundle_id(&transfers); + + PacketBundle { + batch: PacketBatch::new( + transfers + .iter() + .map(|tx| Packet::from_data(None, tx).unwrap()) + .collect(), + ), + bundle_id, + } + }) + .collect() + } + + fn assert_bundles_same( + packet_bundles: &[PacketBundle], + bundles_to_process: &[(ImmutableDeserializedBundle, SanitizedBundle)], + ) { + assert_eq!(packet_bundles.len(), bundles_to_process.len()); + packet_bundles + .iter() + .zip(bundles_to_process.iter()) + .for_each(|(packet_bundle, (_, sanitized_bundle))| { + assert_eq!(packet_bundle.bundle_id, sanitized_bundle.bundle_id); + assert_eq!( + packet_bundle.batch.len(), + sanitized_bundle.transactions.len() + ); + }); + } + + #[test] + fn test_receive_bundles() { + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(10_000); + let bank_forks = + BankForks::new_rw_arc(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + + let mut unprocessed_storage = UnprocessedTransactionStorage::new_bundle_storage( + VecDeque::with_capacity(1_000), + VecDeque::with_capacity(1_000), + ); + + let (sender, receiver) = unbounded(); + let mut bundle_receiver = BundleReceiver::new(0, receiver, bank_forks.clone(), Some(5)); + + let bundles = make_random_bundles(&mint_keypair, 10, 2, genesis_config.hash()); + sender.send(bundles.clone()).unwrap(); + + let mut bundle_stage_stats = BundleStageLoopMetrics::default(); + let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(0); + let result = bundle_receiver.receive_and_buffer_bundles( + &mut unprocessed_storage, + &mut bundle_stage_stats, + &mut bundle_stage_leader_metrics, + ); + assert!(result.is_ok()); + + let bundle_storage = unprocessed_storage.bundle_storage().unwrap(); + assert_eq!(bundle_storage.unprocessed_bundles_len(), 10); + assert_eq!(bundle_storage.unprocessed_packets_len(), 20); + assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 0); + assert_eq!(bundle_storage.cost_model_buffered_packets_len(), 0); + assert_eq!(bundle_storage.max_receive_size(), 990); + + assert!(!bundle_storage.process_bundles( + bank_forks.read().unwrap().working_bank(), + &mut bundle_stage_leader_metrics, + &HashSet::default(), + |bundles_to_process, _stats| { + assert_bundles_same(&bundles, bundles_to_process); + (0..bundles_to_process.len()).map(|_| Ok(())).collect() + } + )); + assert_eq!(bundle_storage.unprocessed_bundles_len(), 0); + assert_eq!(bundle_storage.unprocessed_packets_len(), 0); + assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 0); + assert_eq!(bundle_storage.cost_model_buffered_packets_len(), 0); + assert_eq!(bundle_storage.max_receive_size(), 1000); + } + + #[test] + fn test_receive_more_bundles_than_capacity() { + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(10_000); + let bank_forks = + BankForks::new_rw_arc(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + + let mut unprocessed_storage = UnprocessedTransactionStorage::new_bundle_storage( + VecDeque::with_capacity(10), + VecDeque::with_capacity(10), + ); + + let (sender, receiver) = unbounded(); + let mut bundle_receiver = BundleReceiver::new(0, receiver, bank_forks.clone(), Some(5)); + + let bundles = make_random_bundles(&mint_keypair, 15, 2, genesis_config.hash()); + + sender.send(bundles.clone()).unwrap(); + + let mut bundle_stage_stats = BundleStageLoopMetrics::default(); + let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(0); + let result = bundle_receiver.receive_and_buffer_bundles( + &mut unprocessed_storage, + &mut bundle_stage_stats, + &mut bundle_stage_leader_metrics, + ); + assert!(result.is_ok()); + + let bundle_storage = unprocessed_storage.bundle_storage().unwrap(); + // 15 bundles were sent, but the capacity is 10 + assert_eq!(bundle_storage.unprocessed_bundles_len(), 10); + assert_eq!(bundle_storage.unprocessed_packets_len(), 20); + assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 0); + assert_eq!(bundle_storage.cost_model_buffered_packets_len(), 0); + + assert!(!bundle_storage.process_bundles( + bank_forks.read().unwrap().working_bank(), + &mut bundle_stage_leader_metrics, + &HashSet::default(), + |bundles_to_process, _stats| { + // make sure the first 10 bundles are the ones to process + assert_bundles_same(&bundles[0..10], bundles_to_process); + (0..bundles_to_process.len()).map(|_| Ok(())).collect() + } + )); + assert_eq!(bundle_storage.unprocessed_bundles_len(), 0); + assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 0); + } + + #[test] + fn test_process_bundles_poh_record_error_rebuffered() { + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(10_000); + let bank_forks = + BankForks::new_rw_arc(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + + let mut unprocessed_storage = UnprocessedTransactionStorage::new_bundle_storage( + VecDeque::with_capacity(10), + VecDeque::with_capacity(10), + ); + + let (sender, receiver) = unbounded(); + let mut bundle_receiver = BundleReceiver::new(0, receiver, bank_forks.clone(), Some(5)); + + // send 5 bundles across the queue + let bundles = make_random_bundles(&mint_keypair, 5, 2, genesis_config.hash()); + sender.send(bundles.clone()).unwrap(); + + let mut bundle_stage_stats = BundleStageLoopMetrics::default(); + let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(0); + let result = bundle_receiver.receive_and_buffer_bundles( + &mut unprocessed_storage, + &mut bundle_stage_stats, + &mut bundle_stage_leader_metrics, + ); + assert!(result.is_ok()); + + let poh_max_height_reached_index = 3; + + let bundle_storage = unprocessed_storage.bundle_storage().unwrap(); + + // make sure poh end of slot reached + the correct bundles are buffered for the next time. + // bundles at index 3 + 4 are rebuffered + assert!(bundle_storage.process_bundles( + bank_forks.read().unwrap().working_bank(), + &mut bundle_stage_leader_metrics, + &HashSet::default(), + |bundles_to_process, _stats| { + assert_bundles_same(&bundles, bundles_to_process); + + let mut results = vec![Ok(()); bundles_to_process.len()]; + + (poh_max_height_reached_index..bundles_to_process.len()).for_each(|index| { + results[index] = Err(BundleExecutionError::PohRecordError( + PohRecorderError::MaxHeightReached, + )); + }); + results + } + )); + + assert_eq!(bundle_storage.unprocessed_bundles_len(), 2); + assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 0); + assert!(!bundle_storage.process_bundles( + bank_forks.read().unwrap().working_bank(), + &mut bundle_stage_leader_metrics, + &HashSet::default(), + |bundles_to_process, _stats| { + assert_bundles_same(&bundles[poh_max_height_reached_index..], bundles_to_process); + vec![Ok(()); bundles_to_process.len()] + } + )); + assert_eq!(bundle_storage.unprocessed_bundles_len(), 0); + } + + #[test] + fn test_process_bundles_bank_processing_done_rebuffered() { + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(10_000); + let bank_forks = + BankForks::new_rw_arc(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + + let mut unprocessed_storage = UnprocessedTransactionStorage::new_bundle_storage( + VecDeque::with_capacity(10), + VecDeque::with_capacity(10), + ); + + let (sender, receiver) = unbounded(); + let mut bundle_receiver = BundleReceiver::new(0, receiver, bank_forks.clone(), Some(5)); + + // send 5 bundles across the queue + let bundles = make_random_bundles(&mint_keypair, 5, 2, genesis_config.hash()); + sender.send(bundles.clone()).unwrap(); + + let mut bundle_stage_stats = BundleStageLoopMetrics::default(); + let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(0); + let result = bundle_receiver.receive_and_buffer_bundles( + &mut unprocessed_storage, + &mut bundle_stage_stats, + &mut bundle_stage_leader_metrics, + ); + assert!(result.is_ok()); + + let bank_processing_done_index = 3; + + let bundle_storage = unprocessed_storage.bundle_storage().unwrap(); + + // bundles at index 3 + 4 are rebuffered + assert!(bundle_storage.process_bundles( + bank_forks.read().unwrap().working_bank(), + &mut bundle_stage_leader_metrics, + &HashSet::default(), + |bundles_to_process, _stats| { + assert_bundles_same(&bundles, bundles_to_process); + + let mut results = vec![Ok(()); bundles_to_process.len()]; + + (bank_processing_done_index..bundles_to_process.len()).for_each(|index| { + results[index] = Err(BundleExecutionError::BankProcessingTimeLimitReached); + }); + results + } + )); + + // 0, 1, 2 processed; 3, 4 buffered + assert_eq!(bundle_storage.unprocessed_bundles_len(), 2); + assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 0); + assert!(!bundle_storage.process_bundles( + bank_forks.read().unwrap().working_bank(), + &mut bundle_stage_leader_metrics, + &HashSet::default(), + |bundles_to_process, _stats| { + assert_bundles_same(&bundles[bank_processing_done_index..], bundles_to_process); + vec![Ok(()); bundles_to_process.len()] + } + )); + assert_eq!(bundle_storage.unprocessed_bundles_len(), 0); + } + + #[test] + fn test_process_bundles_bank_execution_error_dropped() { + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(10_000); + let bank_forks = + BankForks::new_rw_arc(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + + let mut unprocessed_storage = UnprocessedTransactionStorage::new_bundle_storage( + VecDeque::with_capacity(10), + VecDeque::with_capacity(10), + ); + + let (sender, receiver) = unbounded(); + let mut bundle_receiver = BundleReceiver::new(0, receiver, bank_forks.clone(), Some(5)); + + // send 5 bundles across the queue + let bundles = make_random_bundles(&mint_keypair, 5, 2, genesis_config.hash()); + sender.send(bundles.clone()).unwrap(); + + let mut bundle_stage_stats = BundleStageLoopMetrics::default(); + let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(0); + let result = bundle_receiver.receive_and_buffer_bundles( + &mut unprocessed_storage, + &mut bundle_stage_stats, + &mut bundle_stage_leader_metrics, + ); + assert!(result.is_ok()); + + let bundle_storage = unprocessed_storage.bundle_storage().unwrap(); + + assert!(!bundle_storage.process_bundles( + bank_forks.read().unwrap().working_bank(), + &mut bundle_stage_leader_metrics, + &HashSet::default(), + |bundles_to_process, _stats| { + assert_bundles_same(&bundles, bundles_to_process); + vec![ + Err(BundleExecutionError::TransactionFailure( + LoadAndExecuteBundleError::ProcessingTimeExceeded(Duration::from_secs(1)), + )); + bundles_to_process.len() + ] + } + )); + assert_eq!(bundle_storage.unprocessed_bundles_len(), 0); + } + + #[test] + fn test_process_bundles_tip_error_dropped() { + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(10_000); + let bank_forks = + BankForks::new_rw_arc(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + + let mut unprocessed_storage = UnprocessedTransactionStorage::new_bundle_storage( + VecDeque::with_capacity(10), + VecDeque::with_capacity(10), + ); + + let (sender, receiver) = unbounded(); + let mut bundle_receiver = BundleReceiver::new(0, receiver, bank_forks.clone(), Some(5)); + + // send 5 bundles across the queue + let bundles = make_random_bundles(&mint_keypair, 5, 2, genesis_config.hash()); + sender.send(bundles.clone()).unwrap(); + + let mut bundle_stage_stats = BundleStageLoopMetrics::default(); + let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(0); + let result = bundle_receiver.receive_and_buffer_bundles( + &mut unprocessed_storage, + &mut bundle_stage_stats, + &mut bundle_stage_leader_metrics, + ); + assert!(result.is_ok()); + + let bundle_storage = unprocessed_storage.bundle_storage().unwrap(); + + assert!(!bundle_storage.process_bundles( + bank_forks.read().unwrap().working_bank(), + &mut bundle_stage_leader_metrics, + &HashSet::default(), + |bundles_to_process, _stats| { + assert_bundles_same(&bundles, bundles_to_process); + vec![ + Err(BundleExecutionError::TipError(TipError::LockError)); + bundles_to_process.len() + ] + } + )); + assert_eq!(bundle_storage.unprocessed_bundles_len(), 0); + } + + #[test] + fn test_process_bundles_lock_error_dropped() { + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(10_000); + let bank_forks = + BankForks::new_rw_arc(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + + let mut unprocessed_storage = UnprocessedTransactionStorage::new_bundle_storage( + VecDeque::with_capacity(10), + VecDeque::with_capacity(10), + ); + + let (sender, receiver) = unbounded(); + let mut bundle_receiver = BundleReceiver::new(0, receiver, bank_forks.clone(), Some(5)); + + // send 5 bundles across the queue + let bundles = make_random_bundles(&mint_keypair, 5, 2, genesis_config.hash()); + sender.send(bundles).unwrap(); + + let mut bundle_stage_stats = BundleStageLoopMetrics::default(); + let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(0); + let result = bundle_receiver.receive_and_buffer_bundles( + &mut unprocessed_storage, + &mut bundle_stage_stats, + &mut bundle_stage_leader_metrics, + ); + assert!(result.is_ok()); + + let bundle_storage = unprocessed_storage.bundle_storage().unwrap(); + + assert!(!bundle_storage.process_bundles( + bank_forks.read().unwrap().working_bank(), + &mut bundle_stage_leader_metrics, + &HashSet::default(), + |bundles_to_process, _stats| { + vec![Err(BundleExecutionError::LockError); bundles_to_process.len()] + } + )); + assert_eq!(bundle_storage.unprocessed_bundles_len(), 0); + } + + #[test] + fn test_process_bundles_cost_model_exceeded_set_aside_and_requeued() { + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(10_000); + let bank_forks = + BankForks::new_rw_arc(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + + let mut unprocessed_storage = UnprocessedTransactionStorage::new_bundle_storage( + VecDeque::with_capacity(10), + VecDeque::with_capacity(10), + ); + + let (sender, receiver) = unbounded(); + let mut bundle_receiver = BundleReceiver::new(0, receiver, bank_forks.clone(), Some(5)); + + // send 5 bundles across the queue + let bundles = make_random_bundles(&mint_keypair, 5, 2, genesis_config.hash()); + sender.send(bundles.clone()).unwrap(); + + let mut bundle_stage_stats = BundleStageLoopMetrics::default(); + let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(0); + let result = bundle_receiver.receive_and_buffer_bundles( + &mut unprocessed_storage, + &mut bundle_stage_stats, + &mut bundle_stage_leader_metrics, + ); + assert!(result.is_ok()); + + let bundle_storage = unprocessed_storage.bundle_storage().unwrap(); + + // buffered bundles are moved to cost model side deque + assert!(!bundle_storage.process_bundles( + bank_forks.read().unwrap().working_bank(), + &mut bundle_stage_leader_metrics, + &HashSet::default(), + |bundles_to_process, _stats| { + assert_bundles_same(&bundles, bundles_to_process); + vec![Err(BundleExecutionError::ExceedsCostModel); bundles_to_process.len()] + } + )); + assert_eq!(bundle_storage.unprocessed_bundles_len(), 0); + assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 5); + + // double check there's no bundles to process + assert!(!bundle_storage.process_bundles( + bank_forks.read().unwrap().working_bank(), + &mut bundle_stage_leader_metrics, + &HashSet::default(), + |bundles_to_process, _stats| { + assert!(bundles_to_process.is_empty()); + vec![Ok(()); bundles_to_process.len()] + } + )); + + // create a new bank w/ new slot number, cost model buffered packets should move back onto queue + // in the same order they were originally + let bank = bank_forks.read().unwrap().working_bank(); + let new_bank = Arc::new(Bank::new_from_parent( + bank.clone(), + bank.collector_id(), + bank.slot() + 1, + )); + assert!(!bundle_storage.process_bundles( + new_bank, + &mut bundle_stage_leader_metrics, + &HashSet::default(), + |bundles_to_process, _stats| { + // make sure same order as original + assert_bundles_same(&bundles, bundles_to_process); + vec![Ok(()); bundles_to_process.len()] + } + )); + assert_eq!(bundle_storage.unprocessed_bundles_len(), 0); + assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 0); + } + + #[test] + fn test_process_bundles_cost_model_exceeded_buffer_capacity() { + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(10_000); + let bank_forks = + BankForks::new_rw_arc(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + + let mut unprocessed_storage = UnprocessedTransactionStorage::new_bundle_storage( + VecDeque::with_capacity(10), + VecDeque::with_capacity(10), + ); + + let (sender, receiver) = unbounded(); + let mut bundle_receiver = BundleReceiver::new(0, receiver, bank_forks.clone(), Some(5)); + + // send 15 bundles across the queue + let bundles0 = make_random_bundles(&mint_keypair, 5, 2, genesis_config.hash()); + sender.send(bundles0.clone()).unwrap(); + + let mut bundle_stage_stats = BundleStageLoopMetrics::default(); + let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(0); + + // receive and buffer bundles to the cost model reserve to test the capacity/dropped bundles there + let result = bundle_receiver.receive_and_buffer_bundles( + &mut unprocessed_storage, + &mut bundle_stage_stats, + &mut bundle_stage_leader_metrics, + ); + assert!(result.is_ok()); + + let bundle_storage = unprocessed_storage.bundle_storage().unwrap(); + // buffered bundles are moved to cost model side deque + assert!(!bundle_storage.process_bundles( + bank_forks.read().unwrap().working_bank(), + &mut bundle_stage_leader_metrics, + &HashSet::default(), + |bundles_to_process, _stats| { + assert_bundles_same(&bundles0, bundles_to_process); + vec![Err(BundleExecutionError::ExceedsCostModel); bundles_to_process.len()] + } + )); + assert_eq!(bundle_storage.unprocessed_bundles_len(), 0); + assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 5); + + let bundles1 = make_random_bundles(&mint_keypair, 5, 2, genesis_config.hash()); + sender.send(bundles1.clone()).unwrap(); + // should get 5 more bundles + cost model buffered length should be 10 + let result = bundle_receiver.receive_and_buffer_bundles( + &mut unprocessed_storage, + &mut bundle_stage_stats, + &mut bundle_stage_leader_metrics, + ); + assert!(result.is_ok()); + + let bundle_storage = unprocessed_storage.bundle_storage().unwrap(); + // buffered bundles are moved to cost model side deque + assert!(!bundle_storage.process_bundles( + bank_forks.read().unwrap().working_bank(), + &mut bundle_stage_leader_metrics, + &HashSet::default(), + |bundles_to_process, _stats| { + assert_bundles_same(&bundles1, bundles_to_process); + vec![Err(BundleExecutionError::ExceedsCostModel); bundles_to_process.len()] + } + )); + assert_eq!(bundle_storage.unprocessed_bundles_len(), 0); + assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 10); + + let bundles2 = make_random_bundles(&mint_keypair, 5, 2, genesis_config.hash()); + sender.send(bundles2.clone()).unwrap(); + + // this set will get dropped from cost model buffered bundles + let result = bundle_receiver.receive_and_buffer_bundles( + &mut unprocessed_storage, + &mut bundle_stage_stats, + &mut bundle_stage_leader_metrics, + ); + assert!(result.is_ok()); + + let bundle_storage = unprocessed_storage.bundle_storage().unwrap(); + // buffered bundles are moved to cost model side deque, but its at capacity so stays the same size + assert!(!bundle_storage.process_bundles( + bank_forks.read().unwrap().working_bank(), + &mut bundle_stage_leader_metrics, + &HashSet::default(), + |bundles_to_process, _stats| { + assert_bundles_same(&bundles2, bundles_to_process); + vec![Err(BundleExecutionError::ExceedsCostModel); bundles_to_process.len()] + } + )); + assert_eq!(bundle_storage.unprocessed_bundles_len(), 0); + assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 10); + + // create new bank then call process_bundles again, expect to see [bundles1,bundles2] + let bank = bank_forks.read().unwrap().working_bank(); + let new_bank = Arc::new(Bank::new_from_parent( + bank.clone(), + bank.collector_id(), + bank.slot() + 1, + )); + assert!(!bundle_storage.process_bundles( + new_bank, + &mut bundle_stage_leader_metrics, + &HashSet::default(), + |bundles_to_process, _stats| { + // make sure same order as original + let expected_bundles: Vec<_> = + bundles0.iter().chain(bundles1.iter()).cloned().collect(); + assert_bundles_same(&expected_bundles, bundles_to_process); + vec![Ok(()); bundles_to_process.len()] + } + )); + assert_eq!(bundle_storage.unprocessed_bundles_len(), 0); + assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 0); + } +} diff --git a/core/src/bundle_stage/bundle_reserved_space_manager.rs b/core/src/bundle_stage/bundle_reserved_space_manager.rs new file mode 100644 index 0000000000..a6b858d3b2 --- /dev/null +++ b/core/src/bundle_stage/bundle_reserved_space_manager.rs @@ -0,0 +1,239 @@ +use {solana_runtime::bank::Bank, solana_sdk::clock::Slot, std::sync::Arc}; + +/// Manager responsible for reserving `bundle_reserved_cost` during the first `reserved_ticks` of a bank +/// and resetting the block cost limit to `block_cost_limit` after the reserved tick period is over +pub struct BundleReservedSpaceManager { + // the bank's cost limit + block_cost_limit: u64, + // bundles get this much reserved space for the first reserved_ticks + bundle_reserved_cost: u64, + // a reduced block_compute_limit is reserved for this many ticks, afterwards it goes back to full cost + reserved_ticks: u64, + last_slot_updated: Slot, +} + +impl BundleReservedSpaceManager { + pub fn new(block_cost_limit: u64, bundle_reserved_cost: u64, reserved_ticks: u64) -> Self { + Self { + block_cost_limit, + bundle_reserved_cost, + reserved_ticks, + last_slot_updated: u64::MAX, + } + } + + /// Call this on creation of new bank and periodically while bundle processing + /// to manage the block_cost_limits + pub fn tick(&mut self, bank: &Arc) { + if self.last_slot_updated == bank.slot() && !self.is_in_reserved_tick_period(bank) { + // new slot logic already ran, need to revert the block cost limit to original if + // ticks are past the reserved tick mark + debug!( + "slot: {} ticks: {}, resetting block_cost_limit to {}", + bank.slot(), + bank.tick_height(), + self.block_cost_limit + ); + bank.write_cost_tracker() + .unwrap() + .set_block_cost_limit(self.block_cost_limit); + } else if self.last_slot_updated != bank.slot() && self.is_in_reserved_tick_period(bank) { + // new slot, if in the first max_tick - tick_height slots reserve space + // otherwise can leave the current block limit as is + let new_block_cost_limit = self.reduced_block_cost_limit(); + debug!( + "slot: {} ticks: {}, reserving block_cost_limit with block_cost_limit of {}", + bank.slot(), + bank.tick_height(), + new_block_cost_limit + ); + bank.write_cost_tracker() + .unwrap() + .set_block_cost_limit(new_block_cost_limit); + self.last_slot_updated = bank.slot(); + } + } + + /// return true if the bank is still in the period where block_cost_limits is reduced + pub fn is_in_reserved_tick_period(&self, bank: &Bank) -> bool { + bank.tick_height() % bank.ticks_per_slot() < self.reserved_ticks + } + + /// return the block_cost_limits as determined by the tick height of the bank + pub fn expected_block_cost_limits(&self, bank: &Bank) -> u64 { + if self.is_in_reserved_tick_period(bank) { + self.reduced_block_cost_limit() + } else { + self.block_cost_limit() + } + } + + pub fn reduced_block_cost_limit(&self) -> u64 { + self.block_cost_limit + .saturating_sub(self.bundle_reserved_cost) + } + + pub fn block_cost_limit(&self) -> u64 { + self.block_cost_limit + } +} + +#[cfg(test)] +mod tests { + use { + crate::bundle_stage::bundle_reserved_space_manager::BundleReservedSpaceManager, + solana_ledger::genesis_utils::create_genesis_config, + solana_runtime::bank::Bank, + solana_sdk::{hash::Hash, pubkey::Pubkey}, + std::sync::Arc, + }; + + #[test] + fn test_reserve_block_cost_limits_during_reserved_ticks() { + const BUNDLE_BLOCK_COST_LIMITS_RESERVATION: u64 = 100; + + let genesis_config_info = create_genesis_config(100); + let bank = Arc::new(Bank::new_for_tests(&genesis_config_info.genesis_config)); + + let block_cost_limits = bank.read_cost_tracker().unwrap().block_cost_limit(); + + let mut reserved_space = BundleReservedSpaceManager::new( + block_cost_limits, + BUNDLE_BLOCK_COST_LIMITS_RESERVATION, + 5, + ); + reserved_space.tick(&bank); + + assert_eq!( + bank.read_cost_tracker().unwrap().block_cost_limit(), + block_cost_limits - BUNDLE_BLOCK_COST_LIMITS_RESERVATION + ); + } + + #[test] + fn test_dont_reserve_block_cost_limits_after_reserved_ticks() { + const BUNDLE_BLOCK_COST_LIMITS_RESERVATION: u64 = 100; + + let genesis_config_info = create_genesis_config(100); + let bank = Arc::new(Bank::new_for_tests(&genesis_config_info.genesis_config)); + + let block_cost_limits = bank.read_cost_tracker().unwrap().block_cost_limit(); + + for _ in 0..5 { + bank.register_tick(&Hash::default()); + } + + let mut reserved_space = BundleReservedSpaceManager::new( + block_cost_limits, + BUNDLE_BLOCK_COST_LIMITS_RESERVATION, + 5, + ); + reserved_space.tick(&bank); + + assert_eq!( + bank.read_cost_tracker().unwrap().block_cost_limit(), + block_cost_limits + ); + } + + #[test] + fn test_dont_reset_block_cost_limits_during_reserved_ticks() { + const BUNDLE_BLOCK_COST_LIMITS_RESERVATION: u64 = 100; + + let genesis_config_info = create_genesis_config(100); + let bank = Arc::new(Bank::new_for_tests(&genesis_config_info.genesis_config)); + + let block_cost_limits = bank.read_cost_tracker().unwrap().block_cost_limit(); + + let mut reserved_space = BundleReservedSpaceManager::new( + block_cost_limits, + BUNDLE_BLOCK_COST_LIMITS_RESERVATION, + 5, + ); + + reserved_space.tick(&bank); + bank.register_tick(&Hash::default()); + reserved_space.tick(&bank); + + assert_eq!( + bank.read_cost_tracker().unwrap().block_cost_limit(), + block_cost_limits - BUNDLE_BLOCK_COST_LIMITS_RESERVATION + ); + } + + #[test] + fn test_reset_block_cost_limits_after_reserved_ticks() { + const BUNDLE_BLOCK_COST_LIMITS_RESERVATION: u64 = 100; + + let genesis_config_info = create_genesis_config(100); + let bank = Arc::new(Bank::new_for_tests(&genesis_config_info.genesis_config)); + + let block_cost_limits = bank.read_cost_tracker().unwrap().block_cost_limit(); + + let mut reserved_space = BundleReservedSpaceManager::new( + block_cost_limits, + BUNDLE_BLOCK_COST_LIMITS_RESERVATION, + 5, + ); + + reserved_space.tick(&bank); + + for _ in 0..5 { + bank.register_tick(&Hash::default()); + } + reserved_space.tick(&bank); + + assert_eq!( + bank.read_cost_tracker().unwrap().block_cost_limit(), + block_cost_limits + ); + } + + #[test] + fn test_block_limits_after_first_slot() { + const BUNDLE_BLOCK_COST_LIMITS_RESERVATION: u64 = 100; + const RESERVED_TICKS: u64 = 5; + let genesis_config_info = create_genesis_config(100); + let bank = Arc::new(Bank::new_for_tests(&genesis_config_info.genesis_config)); + + for _ in 0..genesis_config_info.genesis_config.ticks_per_slot { + bank.register_tick(&Hash::default()); + } + assert!(bank.is_complete()); + bank.freeze(); + assert_eq!( + bank.read_cost_tracker().unwrap().block_cost_limit(), + solana_cost_model::block_cost_limits::MAX_BLOCK_UNITS, + ); + + let bank1 = Arc::new(Bank::new_from_parent(bank.clone(), &Pubkey::default(), 1)); + assert_eq!(bank1.slot(), 1); + assert_eq!(bank1.tick_height(), 64); + assert_eq!(bank1.max_tick_height(), 128); + + // reserve space + let block_cost_limits = bank1.read_cost_tracker().unwrap().block_cost_limit(); + let mut reserved_space = BundleReservedSpaceManager::new( + block_cost_limits, + BUNDLE_BLOCK_COST_LIMITS_RESERVATION, + RESERVED_TICKS, + ); + reserved_space.tick(&bank1); + + // wait for reservation to be over + (0..RESERVED_TICKS).for_each(|_| { + bank1.register_tick(&Hash::default()); + assert_eq!( + bank1.read_cost_tracker().unwrap().block_cost_limit(), + block_cost_limits - BUNDLE_BLOCK_COST_LIMITS_RESERVATION + ); + }); + reserved_space.tick(&bank1); + + // after reservation, revert back to normal limit + assert_eq!( + bank1.read_cost_tracker().unwrap().block_cost_limit(), + solana_cost_model::block_cost_limits::MAX_BLOCK_UNITS, + ); + } +} diff --git a/core/src/bundle_stage/bundle_stage_leader_metrics.rs b/core/src/bundle_stage/bundle_stage_leader_metrics.rs new file mode 100644 index 0000000000..52c1aa0714 --- /dev/null +++ b/core/src/bundle_stage/bundle_stage_leader_metrics.rs @@ -0,0 +1,502 @@ +use { + crate::{ + banking_stage::{leader_slot_metrics, leader_slot_metrics::LeaderSlotMetricsTracker}, + immutable_deserialized_bundle::DeserializedBundleError, + }, + solana_bundle::{bundle_execution::LoadAndExecuteBundleError, BundleExecutionError}, + solana_poh::poh_recorder::BankStart, + solana_sdk::{bundle::SanitizedBundle, clock::Slot, saturating_add_assign}, +}; + +pub struct BundleStageLeaderMetrics { + bundle_stage_metrics_tracker: BundleStageStatsMetricsTracker, + leader_slot_metrics_tracker: LeaderSlotMetricsTracker, +} + +pub(crate) enum MetricsTrackerAction { + Noop, + ReportAndResetTracker, + NewTracker(Option), + ReportAndNewTracker(Option), +} + +impl BundleStageLeaderMetrics { + pub fn new(id: u32) -> Self { + Self { + bundle_stage_metrics_tracker: BundleStageStatsMetricsTracker::new(id), + leader_slot_metrics_tracker: LeaderSlotMetricsTracker::new(id), + } + } + + pub(crate) fn check_leader_slot_boundary( + &mut self, + bank_start: Option<&BankStart>, + ) -> ( + leader_slot_metrics::MetricsTrackerAction, + MetricsTrackerAction, + ) { + let banking_stage_metrics_action = self + .leader_slot_metrics_tracker + .check_leader_slot_boundary(bank_start); + let bundle_stage_metrics_action = self + .bundle_stage_metrics_tracker + .check_leader_slot_boundary(bank_start); + (banking_stage_metrics_action, bundle_stage_metrics_action) + } + + pub(crate) fn apply_action( + &mut self, + banking_stage_metrics_action: leader_slot_metrics::MetricsTrackerAction, + bundle_stage_metrics_action: MetricsTrackerAction, + ) -> Option { + self.leader_slot_metrics_tracker + .apply_action(banking_stage_metrics_action); + self.bundle_stage_metrics_tracker + .apply_action(bundle_stage_metrics_action) + } + + pub fn leader_slot_metrics_tracker(&mut self) -> &mut LeaderSlotMetricsTracker { + &mut self.leader_slot_metrics_tracker + } + + pub fn bundle_stage_metrics_tracker(&mut self) -> &mut BundleStageStatsMetricsTracker { + &mut self.bundle_stage_metrics_tracker + } +} + +pub struct BundleStageStatsMetricsTracker { + bundle_stage_metrics: Option, + id: u32, +} + +impl BundleStageStatsMetricsTracker { + pub fn new(id: u32) -> Self { + Self { + bundle_stage_metrics: None, + id, + } + } + + /// Similar to as LeaderSlotMetricsTracker::check_leader_slot_boundary + pub(crate) fn check_leader_slot_boundary( + &mut self, + bank_start: Option<&BankStart>, + ) -> MetricsTrackerAction { + match (self.bundle_stage_metrics.as_mut(), bank_start) { + (None, None) => MetricsTrackerAction::Noop, + (Some(_), None) => MetricsTrackerAction::ReportAndResetTracker, + // Our leader slot has begun, time to create a new slot tracker + (None, Some(bank_start)) => MetricsTrackerAction::NewTracker(Some( + BundleStageStats::new(self.id, bank_start.working_bank.slot()), + )), + (Some(bundle_stage_metrics), Some(bank_start)) => { + if bundle_stage_metrics.slot != bank_start.working_bank.slot() { + // Last slot has ended, new slot has began + MetricsTrackerAction::ReportAndNewTracker(Some(BundleStageStats::new( + self.id, + bank_start.working_bank.slot(), + ))) + } else { + MetricsTrackerAction::Noop + } + } + } + } + + /// Similar to LeaderSlotMetricsTracker::apply_action + pub(crate) fn apply_action(&mut self, action: MetricsTrackerAction) -> Option { + match action { + MetricsTrackerAction::Noop => None, + MetricsTrackerAction::ReportAndResetTracker => { + let mut reported_slot = None; + if let Some(bundle_stage_metrics) = self.bundle_stage_metrics.as_mut() { + bundle_stage_metrics.report(); + reported_slot = bundle_stage_metrics.reported_slot(); + } + self.bundle_stage_metrics = None; + reported_slot + } + MetricsTrackerAction::NewTracker(new_bundle_stage_metrics) => { + self.bundle_stage_metrics = new_bundle_stage_metrics; + self.bundle_stage_metrics.as_ref().unwrap().reported_slot() + } + MetricsTrackerAction::ReportAndNewTracker(new_bundle_stage_metrics) => { + let mut reported_slot = None; + if let Some(bundle_stage_metrics) = self.bundle_stage_metrics.as_mut() { + bundle_stage_metrics.report(); + reported_slot = bundle_stage_metrics.reported_slot(); + } + self.bundle_stage_metrics = new_bundle_stage_metrics; + reported_slot + } + } + } + + pub(crate) fn increment_sanitize_transaction_result( + &mut self, + result: &Result, + ) { + if let Some(bundle_stage_metrics) = self.bundle_stage_metrics.as_mut() { + match result { + Ok(_) => { + saturating_add_assign!(bundle_stage_metrics.sanitize_transaction_ok, 1); + } + Err(e) => match e { + DeserializedBundleError::VoteOnlyMode => { + saturating_add_assign!( + bundle_stage_metrics.sanitize_transaction_vote_only_mode, + 1 + ); + } + DeserializedBundleError::BlacklistedAccount => { + saturating_add_assign!( + bundle_stage_metrics.sanitize_transaction_blacklisted_account, + 1 + ); + } + DeserializedBundleError::FailedToSerializeTransaction => { + saturating_add_assign!( + bundle_stage_metrics.sanitize_transaction_failed_to_serialize, + 1 + ); + } + DeserializedBundleError::DuplicateTransaction => { + saturating_add_assign!( + bundle_stage_metrics.sanitize_transaction_duplicate_transaction, + 1 + ); + } + DeserializedBundleError::FailedCheckTransactions => { + saturating_add_assign!( + bundle_stage_metrics.sanitize_transaction_failed_check, + 1 + ); + } + DeserializedBundleError::FailedToSerializePacket => { + saturating_add_assign!( + bundle_stage_metrics.sanitize_transaction_failed_to_serialize, + 1 + ); + } + DeserializedBundleError::EmptyBatch => { + saturating_add_assign!( + bundle_stage_metrics.sanitize_transaction_failed_empty_batch, + 1 + ); + } + DeserializedBundleError::TooManyPackets => { + saturating_add_assign!( + bundle_stage_metrics.sanitize_transaction_failed_too_many_packets, + 1 + ); + } + DeserializedBundleError::MarkedDiscard => { + saturating_add_assign!( + bundle_stage_metrics.sanitize_transaction_failed_marked_discard, + 1 + ); + } + DeserializedBundleError::SignatureVerificationFailure => { + saturating_add_assign!( + bundle_stage_metrics.sanitize_transaction_failed_sig_verify_failed, + 1 + ); + } + }, + } + } + } + + pub fn increment_bundle_execution_result(&mut self, result: &Result<(), BundleExecutionError>) { + if let Some(bundle_stage_metrics) = &mut self.bundle_stage_metrics { + match result { + Ok(_) => { + saturating_add_assign!(bundle_stage_metrics.execution_results_ok, 1); + } + Err(BundleExecutionError::PohRecordError(_)) + | Err(BundleExecutionError::BankProcessingTimeLimitReached) => { + saturating_add_assign!( + bundle_stage_metrics.execution_results_poh_max_height, + 1 + ); + } + Err(BundleExecutionError::TransactionFailure( + LoadAndExecuteBundleError::ProcessingTimeExceeded(_), + )) => { + saturating_add_assign!(bundle_stage_metrics.num_execution_timeouts, 1); + } + Err(BundleExecutionError::TransactionFailure( + LoadAndExecuteBundleError::TransactionError { .. }, + )) => { + saturating_add_assign!( + bundle_stage_metrics.execution_results_transaction_failures, + 1 + ); + } + Err(BundleExecutionError::TransactionFailure( + LoadAndExecuteBundleError::LockError { .. }, + )) + | Err(BundleExecutionError::LockError) => { + saturating_add_assign!(bundle_stage_metrics.num_lock_errors, 1); + } + Err(BundleExecutionError::ExceedsCostModel) => { + saturating_add_assign!( + bundle_stage_metrics.execution_results_exceeds_cost_model, + 1 + ); + } + Err(BundleExecutionError::TipError(_)) => { + saturating_add_assign!(bundle_stage_metrics.execution_results_tip_errors, 1); + } + Err(BundleExecutionError::TransactionFailure( + LoadAndExecuteBundleError::InvalidPreOrPostAccounts, + )) => { + saturating_add_assign!(bundle_stage_metrics.bad_argument, 1); + } + } + } + } + + pub(crate) fn increment_sanitize_bundle_elapsed_us(&mut self, count: u64) { + if let Some(bundle_stage_metrics) = &mut self.bundle_stage_metrics { + saturating_add_assign!(bundle_stage_metrics.sanitize_bundle_elapsed_us, count); + } + } + + pub(crate) fn increment_locked_bundle_elapsed_us(&mut self, count: u64) { + if let Some(bundle_stage_metrics) = &mut self.bundle_stage_metrics { + saturating_add_assign!(bundle_stage_metrics.locked_bundle_elapsed_us, count); + } + } + + pub(crate) fn increment_num_init_tip_account_errors(&mut self, count: u64) { + if let Some(bundle_stage_metrics) = &mut self.bundle_stage_metrics { + saturating_add_assign!(bundle_stage_metrics.num_init_tip_account_errors, count); + } + } + + pub(crate) fn increment_num_init_tip_account_ok(&mut self, count: u64) { + if let Some(bundle_stage_metrics) = &mut self.bundle_stage_metrics { + saturating_add_assign!(bundle_stage_metrics.num_init_tip_account_ok, count); + } + } + + pub(crate) fn increment_num_change_tip_receiver_errors(&mut self, count: u64) { + if let Some(bundle_stage_metrics) = &mut self.bundle_stage_metrics { + saturating_add_assign!(bundle_stage_metrics.num_change_tip_receiver_errors, count); + } + } + + pub(crate) fn increment_num_change_tip_receiver_ok(&mut self, count: u64) { + if let Some(bundle_stage_metrics) = &mut self.bundle_stage_metrics { + saturating_add_assign!(bundle_stage_metrics.num_change_tip_receiver_ok, count); + } + } + + pub(crate) fn increment_change_tip_receiver_elapsed_us(&mut self, count: u64) { + if let Some(bundle_stage_metrics) = &mut self.bundle_stage_metrics { + saturating_add_assign!(bundle_stage_metrics.change_tip_receiver_elapsed_us, count); + } + } + + pub(crate) fn increment_num_execution_retries(&mut self, count: u64) { + if let Some(bundle_stage_metrics) = &mut self.bundle_stage_metrics { + saturating_add_assign!(bundle_stage_metrics.num_execution_retries, count); + } + } + + pub(crate) fn increment_execute_locked_bundles_elapsed_us(&mut self, count: u64) { + if let Some(bundle_stage_metrics) = &mut self.bundle_stage_metrics { + saturating_add_assign!( + bundle_stage_metrics.execute_locked_bundles_elapsed_us, + count + ); + } + } +} + +#[derive(Default)] +pub struct BundleStageStats { + id: u32, + slot: u64, + is_reported: bool, + + sanitize_transaction_ok: u64, + sanitize_transaction_vote_only_mode: u64, + sanitize_transaction_blacklisted_account: u64, + sanitize_transaction_failed_to_serialize: u64, + sanitize_transaction_duplicate_transaction: u64, + sanitize_transaction_failed_check: u64, + sanitize_bundle_elapsed_us: u64, + sanitize_transaction_failed_empty_batch: u64, + sanitize_transaction_failed_too_many_packets: u64, + sanitize_transaction_failed_marked_discard: u64, + sanitize_transaction_failed_sig_verify_failed: u64, + + locked_bundle_elapsed_us: u64, + + num_lock_errors: u64, + + num_init_tip_account_errors: u64, + num_init_tip_account_ok: u64, + + num_change_tip_receiver_errors: u64, + num_change_tip_receiver_ok: u64, + change_tip_receiver_elapsed_us: u64, + + num_execution_timeouts: u64, + num_execution_retries: u64, + + execute_locked_bundles_elapsed_us: u64, + + execution_results_ok: u64, + execution_results_poh_max_height: u64, + execution_results_transaction_failures: u64, + execution_results_exceeds_cost_model: u64, + execution_results_tip_errors: u64, + execution_results_max_retries: u64, + + bad_argument: u64, +} + +impl BundleStageStats { + pub fn new(id: u32, slot: Slot) -> BundleStageStats { + BundleStageStats { + id, + slot, + is_reported: false, + ..BundleStageStats::default() + } + } + + /// Returns `Some(self.slot)` if the metrics have been reported, otherwise returns None + fn reported_slot(&self) -> Option { + if self.is_reported { + Some(self.slot) + } else { + None + } + } + + pub fn report(&mut self) { + self.is_reported = true; + + datapoint_info!( + "bundle_stage-stats", + ("id", self.id, i64), + ("slot", self.slot, i64), + ("num_sanitized_ok", self.sanitize_transaction_ok, i64), + ( + "sanitize_transaction_vote_only_mode", + self.sanitize_transaction_vote_only_mode, + i64 + ), + ( + "sanitize_transaction_blacklisted_account", + self.sanitize_transaction_blacklisted_account, + i64 + ), + ( + "sanitize_transaction_failed_to_serialize", + self.sanitize_transaction_failed_to_serialize, + i64 + ), + ( + "sanitize_transaction_duplicate_transaction", + self.sanitize_transaction_duplicate_transaction, + i64 + ), + ( + "sanitize_transaction_failed_check", + self.sanitize_transaction_failed_check, + i64 + ), + ( + "sanitize_bundle_elapsed_us", + self.sanitize_bundle_elapsed_us, + i64 + ), + ( + "sanitize_transaction_failed_empty_batch", + self.sanitize_transaction_failed_empty_batch, + i64 + ), + ( + "sanitize_transaction_failed_too_many_packets", + self.sanitize_transaction_failed_too_many_packets, + i64 + ), + ( + "sanitize_transaction_failed_marked_discard", + self.sanitize_transaction_failed_marked_discard, + i64 + ), + ( + "sanitize_transaction_failed_sig_verify_failed", + self.sanitize_transaction_failed_sig_verify_failed, + i64 + ), + ( + "locked_bundle_elapsed_us", + self.locked_bundle_elapsed_us, + i64 + ), + ("num_lock_errors", self.num_lock_errors, i64), + ( + "num_init_tip_account_errors", + self.num_init_tip_account_errors, + i64 + ), + ("num_init_tip_account_ok", self.num_init_tip_account_ok, i64), + ( + "num_change_tip_receiver_errors", + self.num_change_tip_receiver_errors, + i64 + ), + ( + "num_change_tip_receiver_ok", + self.num_change_tip_receiver_ok, + i64 + ), + ( + "change_tip_receiver_elapsed_us", + self.change_tip_receiver_elapsed_us, + i64 + ), + ("num_execution_timeouts", self.num_execution_timeouts, i64), + ("num_execution_retries", self.num_execution_retries, i64), + ( + "execute_locked_bundles_elapsed_us", + self.execute_locked_bundles_elapsed_us, + i64 + ), + ("execution_results_ok", self.execution_results_ok, i64), + ( + "execution_results_poh_max_height", + self.execution_results_poh_max_height, + i64 + ), + ( + "execution_results_transaction_failures", + self.execution_results_transaction_failures, + i64 + ), + ( + "execution_results_exceeds_cost_model", + self.execution_results_exceeds_cost_model, + i64 + ), + ( + "execution_results_tip_errors", + self.execution_results_tip_errors, + i64 + ), + ( + "execution_results_max_retries", + self.execution_results_max_retries, + i64 + ), + ("bad_argument", self.bad_argument, i64) + ); + } +} diff --git a/core/src/bundle_stage/committer.rs b/core/src/bundle_stage/committer.rs new file mode 100644 index 0000000000..5bdf0c0b5a --- /dev/null +++ b/core/src/bundle_stage/committer.rs @@ -0,0 +1,218 @@ +use { + crate::banking_stage::{ + committer::CommitTransactionDetails, + leader_slot_timing_metrics::LeaderExecuteAndCommitTimings, + }, + solana_accounts_db::transaction_results::TransactionResults, + solana_bundle::bundle_execution::LoadAndExecuteBundleOutput, + solana_ledger::blockstore_processor::TransactionStatusSender, + solana_measure::measure_us, + solana_runtime::{ + bank::{Bank, CommitTransactionCounts, TransactionBalances, TransactionBalancesSet}, + bank_utils, + prioritization_fee_cache::PrioritizationFeeCache, + }, + solana_sdk::{hash::Hash, saturating_add_assign, transaction::SanitizedTransaction}, + solana_transaction_status::{ + token_balances::{TransactionTokenBalances, TransactionTokenBalancesSet}, + PreBalanceInfo, + }, + solana_vote::vote_sender_types::ReplayVoteSender, + std::sync::Arc, +}; + +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct CommitBundleDetails { + pub commit_transaction_details: Vec>, +} + +pub struct Committer { + transaction_status_sender: Option, + replay_vote_sender: ReplayVoteSender, + prioritization_fee_cache: Arc, +} + +impl Committer { + pub fn new( + transaction_status_sender: Option, + replay_vote_sender: ReplayVoteSender, + prioritization_fee_cache: Arc, + ) -> Self { + Self { + transaction_status_sender, + replay_vote_sender, + prioritization_fee_cache, + } + } + + pub(crate) fn transaction_status_sender_enabled(&self) -> bool { + self.transaction_status_sender.is_some() + } + + /// Very similar to Committer::commit_transactions, but works with bundles. + /// The main difference is there's multiple non-parallelizable transaction vectors to commit + /// and post-balances are collected after execution instead of from the bank in Self::collect_balances_and_send_status_batch. + #[allow(clippy::too_many_arguments)] + pub(crate) fn commit_bundle<'a>( + &self, + bundle_execution_output: &'a mut LoadAndExecuteBundleOutput<'a>, + last_blockhash: Hash, + lamports_per_signature: u64, + mut starting_transaction_index: Option, + bank: &Arc, + execute_and_commit_timings: &mut LeaderExecuteAndCommitTimings, + ) -> (u64, CommitBundleDetails) { + let transaction_output = bundle_execution_output.bundle_transaction_results_mut(); + + let (commit_transaction_details, commit_times): (Vec<_>, Vec<_>) = transaction_output + .iter_mut() + .map(|bundle_results| { + let committed_transactions_count = bundle_results + .load_and_execute_transactions_output() + .executed_transactions_count + as u64; + + let committed_non_vote_transactions_count = bundle_results + .load_and_execute_transactions_output() + .executed_non_vote_transactions_count + as u64; + + let committed_with_failure_result_count = bundle_results + .load_and_execute_transactions_output() + .executed_transactions_count + .saturating_sub( + bundle_results + .load_and_execute_transactions_output() + .executed_with_successful_result_count, + ) as u64; + + let signature_count = bundle_results + .load_and_execute_transactions_output() + .signature_count; + + let sanitized_transactions = bundle_results.transactions().to_vec(); + let execution_results = bundle_results.execution_results().to_vec(); + + let loaded_transactions = bundle_results.loaded_transactions_mut(); + debug!("loaded_transactions: {:?}", loaded_transactions); + + let (tx_results, commit_time_us) = measure_us!(bank.commit_transactions( + &sanitized_transactions, + loaded_transactions, + execution_results, + last_blockhash, + lamports_per_signature, + CommitTransactionCounts { + committed_transactions_count, + committed_non_vote_transactions_count, + committed_with_failure_result_count, + signature_count, + }, + &mut execute_and_commit_timings.execute_timings, + )); + + let commit_transaction_statuses: Vec<_> = tx_results + .execution_results + .iter() + .map(|execution_result| match execution_result.details() { + Some(details) => CommitTransactionDetails::Committed { + compute_units: details.executed_units, + }, + None => CommitTransactionDetails::NotCommitted, + }) + .collect(); + + let ((), find_and_send_votes_us) = measure_us!({ + bank_utils::find_and_send_votes( + &sanitized_transactions, + &tx_results, + Some(&self.replay_vote_sender), + ); + + let post_balance_info = bundle_results.post_balance_info().clone(); + let pre_balance_info = bundle_results.pre_balance_info(); + + let num_committed = tx_results + .execution_results + .iter() + .filter(|r| r.was_executed()) + .count(); + + self.collect_balances_and_send_status_batch( + tx_results, + bank, + sanitized_transactions, + pre_balance_info, + post_balance_info, + starting_transaction_index, + ); + + // NOTE: we're doing batched records, so we need to increment the poh starting_transaction_index + // by number committed so the next batch will have the correct starting_transaction_index + starting_transaction_index = + starting_transaction_index.map(|starting_transaction_index| { + starting_transaction_index.saturating_add(num_committed) + }); + + self.prioritization_fee_cache + .update(bank, bundle_results.executed_transactions().into_iter()); + }); + saturating_add_assign!( + execute_and_commit_timings.find_and_send_votes_us, + find_and_send_votes_us + ); + + (commit_transaction_statuses, commit_time_us) + }) + .unzip(); + + ( + commit_times.iter().sum(), + CommitBundleDetails { + commit_transaction_details, + }, + ) + } + + fn collect_balances_and_send_status_batch( + &self, + tx_results: TransactionResults, + bank: &Arc, + sanitized_transactions: Vec, + pre_balance_info: &mut PreBalanceInfo, + (post_balances, post_token_balances): (TransactionBalances, TransactionTokenBalances), + starting_transaction_index: Option, + ) { + if let Some(transaction_status_sender) = &self.transaction_status_sender { + let mut transaction_index = starting_transaction_index.unwrap_or_default(); + let batch_transaction_indexes: Vec<_> = tx_results + .execution_results + .iter() + .map(|result| { + if result.was_executed() { + let this_transaction_index = transaction_index; + saturating_add_assign!(transaction_index, 1); + this_transaction_index + } else { + 0 + } + }) + .collect(); + transaction_status_sender.send_transaction_status_batch( + bank.clone(), + sanitized_transactions, + tx_results.execution_results, + TransactionBalancesSet::new( + std::mem::take(&mut pre_balance_info.native), + post_balances, + ), + TransactionTokenBalancesSet::new( + std::mem::take(&mut pre_balance_info.token), + post_token_balances, + ), + tx_results.rent_debits, + batch_transaction_indexes, + ); + } + } +} diff --git a/core/src/bundle_stage/result.rs b/core/src/bundle_stage/result.rs new file mode 100644 index 0000000000..3370251791 --- /dev/null +++ b/core/src/bundle_stage/result.rs @@ -0,0 +1,41 @@ +use { + crate::{ + bundle_stage::bundle_account_locker::BundleAccountLockerError, tip_manager::TipPaymentError, + }, + anchor_lang::error::Error, + solana_bundle::bundle_execution::LoadAndExecuteBundleError, + solana_poh::poh_recorder::PohRecorderError, + thiserror::Error, +}; + +pub type BundleExecutionResult = Result; + +#[derive(Error, Debug, Clone)] +pub enum BundleExecutionError { + #[error("PoH record error: {0}")] + PohRecordError(#[from] PohRecorderError), + + #[error("Bank is done processing")] + BankProcessingDone, + + #[error("Execution error: {0}")] + ExecutionError(#[from] LoadAndExecuteBundleError), + + #[error("The bundle exceeds the cost model")] + ExceedsCostModel, + + #[error("Tip error {0}")] + TipError(#[from] TipPaymentError), + + #[error("Error locking bundle")] + LockError(#[from] BundleAccountLockerError), +} + +impl From for TipPaymentError { + fn from(anchor_err: Error) -> Self { + match anchor_err { + Error::AnchorError(e) => Self::AnchorError(e.error_msg), + Error::ProgramError(e) => Self::AnchorError(e.to_string()), + } + } +} diff --git a/core/src/consensus_cache_updater.rs b/core/src/consensus_cache_updater.rs new file mode 100644 index 0000000000..e1dc137ba0 --- /dev/null +++ b/core/src/consensus_cache_updater.rs @@ -0,0 +1,52 @@ +use { + solana_runtime::bank::Bank, + solana_sdk::{clock::Epoch, pubkey::Pubkey}, + std::collections::HashSet, +}; + +#[derive(Default)] +pub(crate) struct ConsensusCacheUpdater { + last_epoch_updated: Epoch, + consensus_accounts_cache: HashSet, +} + +impl ConsensusCacheUpdater { + pub(crate) fn consensus_accounts_cache(&self) -> &HashSet { + &self.consensus_accounts_cache + } + + /// Builds a HashSet of all consensus related accounts for the Bank's epoch + fn get_consensus_accounts(bank: &Bank) -> HashSet { + let mut consensus_accounts: HashSet = HashSet::new(); + if let Some(epoch_stakes) = bank.epoch_stakes(bank.epoch()) { + // votes use the following accounts: + // - vote_account pubkey: writeable + // - authorized_voter_pubkey: read-only + // - node_keypair pubkey: payer (writeable) + let node_id_vote_accounts = epoch_stakes.node_id_to_vote_accounts(); + + let vote_accounts = node_id_vote_accounts + .values() + .flat_map(|v| v.vote_accounts.clone()); + + // vote_account + consensus_accounts.extend(vote_accounts); + // authorized_voter_pubkey + consensus_accounts.extend(epoch_stakes.epoch_authorized_voters().keys()); + // node_keypair + consensus_accounts.extend(epoch_stakes.node_id_to_vote_accounts().keys()); + } + consensus_accounts + } + + /// Updates consensus-related accounts on epoch boundaries + pub(crate) fn maybe_update(&mut self, bank: &Bank) -> bool { + if bank.epoch() > self.last_epoch_updated { + self.consensus_accounts_cache = Self::get_consensus_accounts(bank); + self.last_epoch_updated = bank.epoch(); + true + } else { + false + } + } +} diff --git a/core/src/immutable_deserialized_bundle.rs b/core/src/immutable_deserialized_bundle.rs new file mode 100644 index 0000000000..e4f9f8abb3 --- /dev/null +++ b/core/src/immutable_deserialized_bundle.rs @@ -0,0 +1,485 @@ +use { + crate::{ + banking_stage::immutable_deserialized_packet::ImmutableDeserializedPacket, + packet_bundle::PacketBundle, + }, + solana_accounts_db::transaction_error_metrics::TransactionErrorMetrics, + solana_perf::sigverify::verify_packet, + solana_runtime::bank::Bank, + solana_sdk::{ + bundle::SanitizedBundle, clock::MAX_PROCESSING_AGE, pubkey::Pubkey, signature::Signature, + transaction::SanitizedTransaction, + }, + std::{ + collections::{hash_map::RandomState, HashSet}, + iter::repeat, + }, + thiserror::Error, +}; + +#[derive(Debug, Error, Eq, PartialEq)] +pub enum DeserializedBundleError { + #[error("FailedToSerializePacket")] + FailedToSerializePacket, + + #[error("EmptyBatch")] + EmptyBatch, + + #[error("TooManyPackets")] + TooManyPackets, + + #[error("MarkedDiscard")] + MarkedDiscard, + + #[error("SignatureVerificationFailure")] + SignatureVerificationFailure, + + #[error("Bank is in vote-only mode")] + VoteOnlyMode, + + #[error("Bundle mentions blacklisted account")] + BlacklistedAccount, + + #[error("Bundle contains a transaction that failed to serialize")] + FailedToSerializeTransaction, + + #[error("Bundle contains a duplicate transaction")] + DuplicateTransaction, + + #[error("Bundle failed check_transactions")] + FailedCheckTransactions, +} + +#[derive(Debug, PartialEq, Eq)] +pub struct ImmutableDeserializedBundle { + bundle_id: String, + packets: Vec, +} + +impl ImmutableDeserializedBundle { + pub fn new( + bundle: &mut PacketBundle, + max_len: Option, + ) -> Result { + // Checks: non-zero, less than some length, marked for discard, signature verification failed, failed to sanitize to + // ImmutableDeserializedPacket + if bundle.batch.is_empty() { + return Err(DeserializedBundleError::EmptyBatch); + } + if max_len + .map(|max_len| bundle.batch.len() > max_len) + .unwrap_or(false) + { + return Err(DeserializedBundleError::TooManyPackets); + } + if bundle.batch.iter().any(|p| p.meta().discard()) { + return Err(DeserializedBundleError::MarkedDiscard); + } + if bundle.batch.iter_mut().any(|p| !verify_packet(p, false)) { + return Err(DeserializedBundleError::SignatureVerificationFailure); + } + + let immutable_packets: Vec<_> = bundle + .batch + .iter() + .filter_map(|p| ImmutableDeserializedPacket::new(p.clone()).ok()) + .collect(); + + if bundle.batch.len() != immutable_packets.len() { + return Err(DeserializedBundleError::FailedToSerializePacket); + } + + Ok(Self { + bundle_id: bundle.bundle_id.clone(), + packets: immutable_packets, + }) + } + + #[allow(clippy::len_without_is_empty)] + pub fn len(&self) -> usize { + self.packets.len() + } + + pub fn bundle_id(&self) -> &str { + &self.bundle_id + } + + /// A bundle has the following requirements: + /// - all transactions must be sanitiz-able + /// - no duplicate signatures + /// - must not contain a blacklisted account + /// - can't already be processed or contain a bad blockhash + pub fn build_sanitized_bundle( + &self, + bank: &Bank, + blacklisted_accounts: &HashSet, + transaction_error_metrics: &mut TransactionErrorMetrics, + ) -> Result { + if bank.vote_only_bank() { + return Err(DeserializedBundleError::VoteOnlyMode); + } + + let transactions: Vec = self + .packets + .iter() + .filter_map(|p| { + p.build_sanitized_transaction(&bank.feature_set, bank.vote_only_bank(), bank) + }) + .collect(); + + if self.packets.len() != transactions.len() { + return Err(DeserializedBundleError::FailedToSerializeTransaction); + } + + let unique_signatures: HashSet<&Signature, RandomState> = + HashSet::from_iter(transactions.iter().map(|tx| tx.signature())); + if unique_signatures.len() != transactions.len() { + return Err(DeserializedBundleError::DuplicateTransaction); + } + + let contains_blacklisted_account = transactions.iter().any(|tx| { + tx.message() + .account_keys() + .iter() + .any(|acc| blacklisted_accounts.contains(acc)) + }); + + if contains_blacklisted_account { + return Err(DeserializedBundleError::BlacklistedAccount); + } + + // assume everything locks okay to check for already-processed transaction or expired/invalid blockhash + let lock_results: Vec<_> = repeat(Ok(())).take(transactions.len()).collect(); + let check_results = bank.check_transactions( + &transactions, + &lock_results, + MAX_PROCESSING_AGE, + transaction_error_metrics, + ); + + if check_results.iter().any(|r| r.0.is_err()) { + return Err(DeserializedBundleError::FailedCheckTransactions); + } + + Ok(SanitizedBundle { + transactions, + bundle_id: self.bundle_id.clone(), + }) + } +} + +#[cfg(test)] +mod tests { + use { + crate::{ + immutable_deserialized_bundle::{DeserializedBundleError, ImmutableDeserializedBundle}, + packet_bundle::PacketBundle, + }, + solana_accounts_db::transaction_error_metrics::TransactionErrorMetrics, + solana_client::rpc_client::SerializableTransaction, + solana_ledger::genesis_utils::create_genesis_config, + solana_perf::packet::PacketBatch, + solana_runtime::{ + bank::{Bank, NewBankOptions}, + genesis_utils::GenesisConfigInfo, + }, + solana_sdk::{ + hash::Hash, + packet::Packet, + pubkey::Pubkey, + signature::{Keypair, Signer}, + system_transaction::transfer, + }, + std::{collections::HashSet, sync::Arc}, + }; + + /// Happy case + #[test] + fn test_simple_get_sanitized_bundle() { + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(10_000); + let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + + let kp = Keypair::new(); + + let tx0 = transfer(&mint_keypair, &kp.pubkey(), 500, genesis_config.hash()); + + let tx1 = transfer(&mint_keypair, &kp.pubkey(), 501, genesis_config.hash()); + + let bundle = ImmutableDeserializedBundle::new( + &mut PacketBundle { + batch: PacketBatch::new(vec![ + Packet::from_data(None, &tx0).unwrap(), + Packet::from_data(None, &tx1).unwrap(), + ]), + bundle_id: String::default(), + }, + None, + ) + .unwrap(); + + let mut transaction_errors = TransactionErrorMetrics::default(); + let sanitized_bundle = bundle + .build_sanitized_bundle(&bank, &HashSet::default(), &mut transaction_errors) + .unwrap(); + assert_eq!(sanitized_bundle.transactions.len(), 2); + assert_eq!( + sanitized_bundle.transactions[0].signature(), + tx0.get_signature() + ); + assert_eq!( + sanitized_bundle.transactions[1].signature(), + tx1.get_signature() + ); + } + + #[test] + fn test_empty_batch_fails_to_init() { + assert_eq!( + ImmutableDeserializedBundle::new( + &mut PacketBundle { + batch: PacketBatch::new(vec![]), + bundle_id: String::default(), + }, + None, + ), + Err(DeserializedBundleError::EmptyBatch) + ); + } + + #[test] + fn test_too_many_packets_fails_to_init() { + let kp = Keypair::new(); + + assert_eq!( + ImmutableDeserializedBundle::new( + &mut PacketBundle { + batch: PacketBatch::new( + (0..10) + .map(|i| { + Packet::from_data( + None, + transfer(&kp, &kp.pubkey(), i, Hash::default()), + ) + .unwrap() + }) + .collect() + ), + bundle_id: String::default(), + }, + Some(5), + ), + Err(DeserializedBundleError::TooManyPackets) + ); + } + + #[test] + fn test_packets_marked_discard_fails_to_init() { + let kp = Keypair::new(); + + let mut packet = + Packet::from_data(None, transfer(&kp, &kp.pubkey(), 100, Hash::default())).unwrap(); + packet.meta_mut().set_discard(true); + + assert_eq!( + ImmutableDeserializedBundle::new( + &mut PacketBundle { + batch: PacketBatch::new(vec![packet]), + bundle_id: String::default(), + }, + Some(5), + ), + Err(DeserializedBundleError::MarkedDiscard) + ); + } + + #[test] + fn test_bad_signature_fails_to_init() { + let kp0 = Keypair::new(); + let kp1 = Keypair::new(); + + let mut tx0 = transfer(&kp0, &kp0.pubkey(), 100, Hash::default()); + let tx1 = transfer(&kp1, &kp0.pubkey(), 100, Hash::default()); + tx0.signatures = tx1.signatures; + + assert_eq!( + ImmutableDeserializedBundle::new( + &mut PacketBundle { + batch: PacketBatch::new(vec![Packet::from_data(None, tx0).unwrap()]), + bundle_id: String::default(), + }, + None + ), + Err(DeserializedBundleError::SignatureVerificationFailure) + ); + } + + #[test] + fn test_vote_only_bank_fails_to_build() { + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(10_000); + let parent = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let vote_only_bank = Arc::new(Bank::new_from_parent_with_options( + parent, + &Pubkey::new_unique(), + 1, + NewBankOptions { + vote_only_bank: true, + }, + )); + + let kp = Keypair::new(); + + let tx0 = transfer(&mint_keypair, &kp.pubkey(), 500, genesis_config.hash()); + + let bundle = ImmutableDeserializedBundle::new( + &mut PacketBundle { + batch: PacketBatch::new(vec![Packet::from_data(None, tx0).unwrap()]), + bundle_id: String::default(), + }, + None, + ) + .unwrap(); + + let mut transaction_errors = TransactionErrorMetrics::default(); + assert_matches!( + bundle.build_sanitized_bundle( + &vote_only_bank, + &HashSet::default(), + &mut transaction_errors + ), + Err(DeserializedBundleError::VoteOnlyMode) + ); + } + + #[test] + fn test_duplicate_signature_fails_to_build() { + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(10_000); + let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + + let kp = Keypair::new(); + + let tx0 = transfer(&mint_keypair, &kp.pubkey(), 500, genesis_config.hash()); + + let bundle = ImmutableDeserializedBundle::new( + &mut PacketBundle { + batch: PacketBatch::new(vec![ + Packet::from_data(None, &tx0).unwrap(), + Packet::from_data(None, &tx0).unwrap(), + ]), + bundle_id: String::default(), + }, + None, + ) + .unwrap(); + + let mut transaction_errors = TransactionErrorMetrics::default(); + assert_matches!( + bundle.build_sanitized_bundle(&bank, &HashSet::default(), &mut transaction_errors), + Err(DeserializedBundleError::DuplicateTransaction) + ); + } + + #[test] + fn test_blacklisted_account_fails_to_build() { + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(10_000); + let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + + let kp = Keypair::new(); + + let tx0 = transfer(&mint_keypair, &kp.pubkey(), 500, genesis_config.hash()); + + let bundle = ImmutableDeserializedBundle::new( + &mut PacketBundle { + batch: PacketBatch::new(vec![Packet::from_data(None, tx0).unwrap()]), + bundle_id: String::default(), + }, + None, + ) + .unwrap(); + + let mut transaction_errors = TransactionErrorMetrics::default(); + assert_matches!( + bundle.build_sanitized_bundle( + &bank, + &HashSet::from([kp.pubkey()]), + &mut transaction_errors + ), + Err(DeserializedBundleError::BlacklistedAccount) + ); + } + + #[test] + fn test_already_processed_tx_fails_to_build() { + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(10_000); + let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + + let kp = Keypair::new(); + + let tx0 = transfer(&mint_keypair, &kp.pubkey(), 500, genesis_config.hash()); + + bank.process_transaction(&tx0).unwrap(); + + let bundle = ImmutableDeserializedBundle::new( + &mut PacketBundle { + batch: PacketBatch::new(vec![Packet::from_data(None, tx0).unwrap()]), + bundle_id: String::default(), + }, + None, + ) + .unwrap(); + + let mut transaction_errors = TransactionErrorMetrics::default(); + assert_matches!( + bundle.build_sanitized_bundle(&bank, &HashSet::default(), &mut transaction_errors), + Err(DeserializedBundleError::FailedCheckTransactions) + ); + } + + #[test] + fn test_bad_blockhash_fails_to_build() { + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(10_000); + let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + + let kp = Keypair::new(); + + let tx0 = transfer(&mint_keypair, &kp.pubkey(), 500, Hash::default()); + + let bundle = ImmutableDeserializedBundle::new( + &mut PacketBundle { + batch: PacketBatch::new(vec![Packet::from_data(None, tx0).unwrap()]), + bundle_id: String::default(), + }, + None, + ) + .unwrap(); + + let mut transaction_errors = TransactionErrorMetrics::default(); + assert_matches!( + bundle.build_sanitized_bundle(&bank, &HashSet::default(), &mut transaction_errors), + Err(DeserializedBundleError::FailedCheckTransactions) + ); + } +} diff --git a/core/src/lib.rs b/core/src/lib.rs index 99ac98b5d4..79d039d3f1 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -12,22 +12,27 @@ pub mod accounts_hash_verifier; pub mod admin_rpc_post_init; pub mod banking_stage; pub mod banking_trace; +pub mod bundle_stage; pub mod cache_block_meta_service; pub mod cluster_info_vote_listener; pub mod cluster_slots_service; pub mod commitment_service; pub mod completed_data_sets_service; pub mod consensus; +pub mod consensus_cache_updater; pub mod cost_update_service; pub mod drop_bank_service; pub mod fetch_stage; pub mod gen_keys; +pub mod immutable_deserialized_bundle; pub mod ledger_cleanup_service; pub mod ledger_metric_report_service; pub mod next_leader; pub mod optimistic_confirmation_verifier; +pub mod packet_bundle; pub mod poh_timing_report_service; pub mod poh_timing_reporter; +pub mod proxy; pub mod repair; pub mod replay_stage; mod result; @@ -40,6 +45,7 @@ pub mod snapshot_packager_service; pub mod staked_nodes_updater_service; pub mod stats_reporter_service; pub mod system_monitor_service; +pub mod tip_manager; pub mod tpu; mod tpu_entry_notifier; pub mod tracer_packet_stats; @@ -70,3 +76,41 @@ extern crate solana_frozen_abi_macro; #[cfg(test)] #[macro_use] extern crate assert_matches; + +use { + solana_sdk::packet::{Meta, Packet, PacketFlags, PACKET_DATA_SIZE}, + std::{ + cmp::min, + net::{IpAddr, Ipv4Addr}, + }, +}; + +const UNKNOWN_IP: IpAddr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)); + +// NOTE: last profiled at around 180ns +pub fn proto_packet_to_packet(p: jito_protos::proto::packet::Packet) -> Packet { + let mut data = [0; PACKET_DATA_SIZE]; + let copy_len = min(data.len(), p.data.len()); + data[..copy_len].copy_from_slice(&p.data[..copy_len]); + let mut packet = Packet::new(data, Meta::default()); + if let Some(meta) = p.meta { + packet.meta_mut().size = meta.size as usize; + packet.meta_mut().addr = meta.addr.parse().unwrap_or(UNKNOWN_IP); + packet.meta_mut().port = meta.port as u16; + if let Some(flags) = meta.flags { + if flags.simple_vote_tx { + packet.meta_mut().flags.insert(PacketFlags::SIMPLE_VOTE_TX); + } + if flags.forwarded { + packet.meta_mut().flags.insert(PacketFlags::FORWARDED); + } + if flags.tracer_packet { + packet.meta_mut().flags.insert(PacketFlags::TRACER_PACKET); + } + if flags.repair { + packet.meta_mut().flags.insert(PacketFlags::REPAIR); + } + } + } + packet +} diff --git a/core/src/packet_bundle.rs b/core/src/packet_bundle.rs new file mode 100644 index 0000000000..2158f37414 --- /dev/null +++ b/core/src/packet_bundle.rs @@ -0,0 +1,7 @@ +use solana_perf::packet::PacketBatch; + +#[derive(Clone, Debug)] +pub struct PacketBundle { + pub batch: PacketBatch, + pub bundle_id: String, +} diff --git a/core/src/proxy/auth.rs b/core/src/proxy/auth.rs new file mode 100644 index 0000000000..39821e12ef --- /dev/null +++ b/core/src/proxy/auth.rs @@ -0,0 +1,185 @@ +use { + crate::proxy::ProxyError, + chrono::Utc, + jito_protos::proto::auth::{ + auth_service_client::AuthServiceClient, GenerateAuthChallengeRequest, + GenerateAuthTokensRequest, RefreshAccessTokenRequest, Role, Token, + }, + solana_gossip::cluster_info::ClusterInfo, + solana_sdk::signature::{Keypair, Signer}, + std::{ + sync::{Arc, Mutex}, + time::Duration, + }, + tokio::time::timeout, + tonic::{service::Interceptor, transport::Channel, Code, Request, Status}, +}; + +/// Interceptor responsible for adding the access token to request headers. +pub(crate) struct AuthInterceptor { + /// The token added to each request header. + access_token: Arc>, +} + +impl AuthInterceptor { + pub(crate) fn new(access_token: Arc>) -> Self { + Self { access_token } + } +} + +impl Interceptor for AuthInterceptor { + fn call(&mut self, mut request: Request<()>) -> Result, Status> { + request.metadata_mut().insert( + "authorization", + format!("Bearer {}", self.access_token.lock().unwrap().value) + .parse() + .unwrap(), + ); + + Ok(request) + } +} + +/// Generates an auth challenge then generates and returns validated auth tokens. +pub async fn generate_auth_tokens( + auth_service_client: &mut AuthServiceClient, + // used to sign challenges + keypair: &Keypair, +) -> crate::proxy::Result<( + Token, /* access_token */ + Token, /* refresh_token */ +)> { + debug!("generate_auth_challenge"); + let challenge_response = auth_service_client + .generate_auth_challenge(GenerateAuthChallengeRequest { + role: Role::Validator as i32, + pubkey: keypair.pubkey().as_ref().to_vec(), + }) + .await + .map_err(|e: Status| { + if e.code() == Code::PermissionDenied { + ProxyError::AuthenticationPermissionDenied + } else { + ProxyError::AuthenticationError(e.to_string()) + } + })?; + + let formatted_challenge = format!( + "{}-{}", + keypair.pubkey(), + challenge_response.into_inner().challenge + ); + + let signed_challenge = keypair + .sign_message(formatted_challenge.as_bytes()) + .as_ref() + .to_vec(); + + debug!( + "formatted_challenge: {} signed_challenge: {:?}", + formatted_challenge, signed_challenge + ); + + debug!("generate_auth_tokens"); + let auth_tokens = auth_service_client + .generate_auth_tokens(GenerateAuthTokensRequest { + challenge: formatted_challenge, + client_pubkey: keypair.pubkey().as_ref().to_vec(), + signed_challenge, + }) + .await + .map_err(|e| ProxyError::AuthenticationError(e.to_string()))?; + + let inner = auth_tokens.into_inner(); + let access_token = get_validated_token(inner.access_token)?; + let refresh_token = get_validated_token(inner.refresh_token)?; + + Ok((access_token, refresh_token)) +} + +/// Tries to refresh the access token or run full-reauth if needed. +pub async fn maybe_refresh_auth_tokens( + auth_service_client: &mut AuthServiceClient, + access_token: &Arc>, + refresh_token: &Token, + cluster_info: &Arc, + connection_timeout: &Duration, + refresh_within_s: u64, +) -> crate::proxy::Result<( + Option, // access token + Option, // refresh token +)> { + let access_token_expiry: u64 = access_token + .lock() + .unwrap() + .expires_at_utc + .as_ref() + .map(|ts| ts.seconds as u64) + .unwrap_or_default(); + let refresh_token_expiry: u64 = refresh_token + .expires_at_utc + .as_ref() + .map(|ts| ts.seconds as u64) + .unwrap_or_default(); + + let now = Utc::now().timestamp() as u64; + + let should_refresh_access = + access_token_expiry.checked_sub(now).unwrap_or_default() <= refresh_within_s; + let should_generate_new_tokens = + refresh_token_expiry.checked_sub(now).unwrap_or_default() <= refresh_within_s; + + if should_generate_new_tokens { + let kp = cluster_info.keypair().clone(); + + let (new_access_token, new_refresh_token) = timeout( + *connection_timeout, + generate_auth_tokens(auth_service_client, kp.as_ref()), + ) + .await + .map_err(|_| ProxyError::MethodTimeout("generate_auth_tokens".to_string()))? + .map_err(|e| ProxyError::MethodError(e.to_string()))?; + + return Ok((Some(new_access_token), Some(new_refresh_token))); + } else if should_refresh_access { + let new_access_token = timeout( + *connection_timeout, + refresh_access_token(auth_service_client, refresh_token), + ) + .await + .map_err(|_| ProxyError::MethodTimeout("refresh_access_token".to_string()))? + .map_err(|e| ProxyError::MethodError(e.to_string()))?; + + return Ok((Some(new_access_token), None)); + } + + Ok((None, None)) +} + +pub async fn refresh_access_token( + auth_service_client: &mut AuthServiceClient, + refresh_token: &Token, +) -> crate::proxy::Result { + let response = auth_service_client + .refresh_access_token(RefreshAccessTokenRequest { + refresh_token: refresh_token.value.clone(), + }) + .await + .map_err(|e| ProxyError::AuthenticationError(e.to_string()))?; + get_validated_token(response.into_inner().access_token) +} + +/// An invalid token is one where any of its fields are None or the token itself is None. +/// Performs the necessary validations on the auth tokens before returning, +/// i.e. it is safe to call .unwrap() on the token fields from the call-site. +fn get_validated_token(maybe_token: Option) -> crate::proxy::Result { + let token = maybe_token + .ok_or_else(|| ProxyError::BadAuthenticationToken("received a null token".to_string()))?; + if token.expires_at_utc.is_none() { + Err(ProxyError::BadAuthenticationToken( + "expires_at_utc field is null".to_string(), + )) + } else { + Ok(token) + } +} diff --git a/core/src/proxy/block_engine_stage.rs b/core/src/proxy/block_engine_stage.rs new file mode 100644 index 0000000000..4128f5379f --- /dev/null +++ b/core/src/proxy/block_engine_stage.rs @@ -0,0 +1,533 @@ +//! Maintains a connection to the Block Engine. +//! +//! The Block Engine is responsible for the following: +//! - Acts as a system that sends high profit bundles and transactions to a validator. +//! - Sends transactions and bundles to the validator. +use { + crate::{ + banking_trace::BankingPacketSender, + packet_bundle::PacketBundle, + proto_packet_to_packet, + proxy::{ + auth::{generate_auth_tokens, maybe_refresh_auth_tokens, AuthInterceptor}, + ProxyError, + }, + }, + crossbeam_channel::Sender, + jito_protos::proto::{ + auth::{auth_service_client::AuthServiceClient, Token}, + block_engine::{ + self, block_engine_validator_client::BlockEngineValidatorClient, + BlockBuilderFeeInfoRequest, + }, + }, + solana_gossip::cluster_info::ClusterInfo, + solana_perf::packet::PacketBatch, + solana_sdk::{ + pubkey::Pubkey, saturating_add_assign, signature::Signer, signer::keypair::Keypair, + }, + std::{ + str::FromStr, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, Mutex, + }, + thread::{self, Builder, JoinHandle}, + time::Duration, + }, + tokio::time::{interval, sleep, timeout}, + tonic::{ + codegen::InterceptedService, + transport::{Channel, Endpoint}, + Status, Streaming, + }, +}; + +const CONNECTION_TIMEOUT_S: u64 = 10; +const CONNECTION_BACKOFF_S: u64 = 5; + +#[derive(Default)] +struct BlockEngineStageStats { + num_bundles: u64, + num_bundle_packets: u64, + num_packets: u64, + num_empty_packets: u64, +} + +impl BlockEngineStageStats { + pub(crate) fn report(&self) { + datapoint_info!( + "block_engine_stage-stats", + ("num_bundles", self.num_bundles, i64), + ("num_bundle_packets", self.num_bundle_packets, i64), + ("num_packets", self.num_packets, i64), + ("num_empty_packets", self.num_empty_packets, i64) + ); + } +} + +pub struct BlockBuilderFeeInfo { + pub block_builder: Pubkey, + pub block_builder_commission: u64, +} + +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct BlockEngineConfig { + /// Block Engine URL + pub block_engine_url: String, + + /// If set then it will be assumed the backend verified packets so signature verification will be bypassed in the validator. + pub trust_packets: bool, +} + +pub struct BlockEngineStage { + t_hdls: Vec>, +} + +impl BlockEngineStage { + pub fn new( + block_engine_config: Arc>, + // Channel that bundles get piped through. + bundle_tx: Sender>, + // The keypair stored here is used to sign auth challenges. + cluster_info: Arc, + // Channel that non-trusted packets get piped through. + packet_tx: Sender, + // Channel that trusted packets get piped through. + banking_packet_sender: BankingPacketSender, + exit: Arc, + block_builder_fee_info: &Arc>, + ) -> Self { + let block_builder_fee_info = block_builder_fee_info.clone(); + + let thread = Builder::new() + .name("block-engine-stage".to_string()) + .spawn(move || { + let rt = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build() + .unwrap(); + rt.block_on(Self::start( + block_engine_config, + cluster_info, + bundle_tx, + packet_tx, + banking_packet_sender, + exit, + block_builder_fee_info, + )); + }) + .unwrap(); + + Self { + t_hdls: vec![thread], + } + } + + pub fn join(self) -> thread::Result<()> { + for t in self.t_hdls { + t.join()?; + } + Ok(()) + } + + #[allow(clippy::too_many_arguments)] + async fn start( + block_engine_config: Arc>, + cluster_info: Arc, + bundle_tx: Sender>, + packet_tx: Sender, + banking_packet_sender: BankingPacketSender, + exit: Arc, + block_builder_fee_info: Arc>, + ) { + const CONNECTION_TIMEOUT: Duration = Duration::from_secs(CONNECTION_TIMEOUT_S); + const CONNECTION_BACKOFF: Duration = Duration::from_secs(CONNECTION_BACKOFF_S); + let mut error_count: u64 = 0; + + while !exit.load(Ordering::Relaxed) { + // Wait until a valid config is supplied (either initially or by admin rpc) + // Use if!/else here to avoid extra CONNECTION_BACKOFF wait on successful termination + if !Self::is_valid_block_engine_config(&block_engine_config.lock().unwrap()) { + sleep(CONNECTION_BACKOFF).await; + } else if let Err(e) = Self::connect_auth_and_stream( + &block_engine_config, + &cluster_info, + &bundle_tx, + &packet_tx, + &banking_packet_sender, + &exit, + &block_builder_fee_info, + &CONNECTION_TIMEOUT, + ) + .await + { + match e { + // This error is frequent on hot spares, and the parsed string does not work + // with datapoints (incorrect escaping). + ProxyError::AuthenticationPermissionDenied => { + warn!("block engine permission denied. not on leader schedule. ignore if hot-spare.") + } + e => { + error_count += 1; + datapoint_warn!( + "block_engine_stage-proxy_error", + ("count", error_count, i64), + ("error", e.to_string(), String), + ); + } + } + sleep(CONNECTION_BACKOFF).await; + } + } + } + + async fn connect_auth_and_stream( + block_engine_config: &Arc>, + cluster_info: &Arc, + bundle_tx: &Sender>, + packet_tx: &Sender, + banking_packet_sender: &BankingPacketSender, + exit: &Arc, + block_builder_fee_info: &Arc>, + connection_timeout: &Duration, + ) -> crate::proxy::Result<()> { + // Get a copy of configs here in case they have changed at runtime + let keypair = cluster_info.keypair().clone(); + let local_config = block_engine_config.lock().unwrap().clone(); + + let mut backend_endpoint = Endpoint::from_shared(local_config.block_engine_url.clone()) + .map_err(|_| { + ProxyError::BlockEngineConnectionError(format!( + "invalid block engine url value: {}", + local_config.block_engine_url + )) + })? + .tcp_keepalive(Some(Duration::from_secs(60))); + if local_config.block_engine_url.starts_with("https") { + backend_endpoint = backend_endpoint + .tls_config(tonic::transport::ClientTlsConfig::new()) + .map_err(|_| { + ProxyError::BlockEngineConnectionError( + "failed to set tls_config for block engine service".to_string(), + ) + })?; + } + + debug!("connecting to auth: {}", local_config.block_engine_url); + let auth_channel = timeout(*connection_timeout, backend_endpoint.connect()) + .await + .map_err(|_| ProxyError::AuthenticationConnectionTimeout)? + .map_err(|e| ProxyError::AuthenticationConnectionError(e.to_string()))?; + + let mut auth_client = AuthServiceClient::new(auth_channel); + + debug!("generating authentication token"); + let (access_token, refresh_token) = timeout( + *connection_timeout, + generate_auth_tokens(&mut auth_client, &keypair), + ) + .await + .map_err(|_| ProxyError::AuthenticationTimeout)??; + + datapoint_info!( + "block_engine_stage-tokens_generated", + ("url", local_config.block_engine_url, String), + ("count", 1, i64), + ); + + debug!( + "connecting to block engine: {}", + local_config.block_engine_url + ); + let block_engine_channel = timeout(*connection_timeout, backend_endpoint.connect()) + .await + .map_err(|_| ProxyError::BlockEngineConnectionTimeout)? + .map_err(|e| ProxyError::BlockEngineConnectionError(e.to_string()))?; + + let access_token = Arc::new(Mutex::new(access_token)); + let block_engine_client = BlockEngineValidatorClient::with_interceptor( + block_engine_channel, + AuthInterceptor::new(access_token.clone()), + ); + + Self::start_consuming_block_engine_bundles_and_packets( + bundle_tx, + block_engine_client, + packet_tx, + &local_config, + block_engine_config, + banking_packet_sender, + exit, + block_builder_fee_info, + auth_client, + access_token, + refresh_token, + connection_timeout, + keypair, + cluster_info, + ) + .await + } + + #[allow(clippy::too_many_arguments)] + async fn start_consuming_block_engine_bundles_and_packets( + bundle_tx: &Sender>, + mut client: BlockEngineValidatorClient>, + packet_tx: &Sender, + local_config: &BlockEngineConfig, // local copy of config with current connections + global_config: &Arc>, // guarded reference for detecting run-time updates + banking_packet_sender: &BankingPacketSender, + exit: &Arc, + block_builder_fee_info: &Arc>, + auth_client: AuthServiceClient, + access_token: Arc>, + refresh_token: Token, + connection_timeout: &Duration, + keypair: Arc, + cluster_info: &Arc, + ) -> crate::proxy::Result<()> { + let subscribe_packets_stream = timeout( + *connection_timeout, + client.subscribe_packets(block_engine::SubscribePacketsRequest {}), + ) + .await + .map_err(|_| ProxyError::MethodTimeout("block_engine_subscribe_packets".to_string()))? + .map_err(|e| ProxyError::MethodError(e.to_string()))? + .into_inner(); + + let subscribe_bundles_stream = timeout( + *connection_timeout, + client.subscribe_bundles(block_engine::SubscribeBundlesRequest {}), + ) + .await + .map_err(|_| ProxyError::MethodTimeout("subscribe_bundles".to_string()))? + .map_err(|e| ProxyError::MethodError(e.to_string()))? + .into_inner(); + + let block_builder_info = timeout( + *connection_timeout, + client.get_block_builder_fee_info(BlockBuilderFeeInfoRequest {}), + ) + .await + .map_err(|_| ProxyError::MethodTimeout("get_block_builder_fee_info".to_string()))? + .map_err(|e| ProxyError::MethodError(e.to_string()))? + .into_inner(); + + { + let mut bb_fee = block_builder_fee_info.lock().unwrap(); + bb_fee.block_builder_commission = block_builder_info.commission; + bb_fee.block_builder = + Pubkey::from_str(&block_builder_info.pubkey).unwrap_or(bb_fee.block_builder); + } + + Self::consume_bundle_and_packet_stream( + client, + (subscribe_bundles_stream, subscribe_packets_stream), + bundle_tx, + packet_tx, + local_config, + global_config, + banking_packet_sender, + exit, + block_builder_fee_info, + auth_client, + access_token, + refresh_token, + keypair, + cluster_info, + connection_timeout, + ) + .await + } + + #[allow(clippy::too_many_arguments)] + async fn consume_bundle_and_packet_stream( + mut client: BlockEngineValidatorClient>, + (mut bundle_stream, mut packet_stream): ( + Streaming, + Streaming, + ), + bundle_tx: &Sender>, + packet_tx: &Sender, + local_config: &BlockEngineConfig, // local copy of config with current connections + global_config: &Arc>, // guarded reference for detecting run-time updates + banking_packet_sender: &BankingPacketSender, + exit: &Arc, + block_builder_fee_info: &Arc>, + mut auth_client: AuthServiceClient, + access_token: Arc>, + mut refresh_token: Token, + keypair: Arc, + cluster_info: &Arc, + connection_timeout: &Duration, + ) -> crate::proxy::Result<()> { + const METRICS_TICK: Duration = Duration::from_secs(1); + const MAINTENANCE_TICK: Duration = Duration::from_secs(10 * 60); + let refresh_within_s: u64 = METRICS_TICK.as_secs().saturating_mul(3).saturating_div(2); + + let mut num_full_refreshes: u64 = 1; + let mut num_refresh_access_token: u64 = 0; + let mut block_engine_stats = BlockEngineStageStats::default(); + let mut metrics_and_auth_tick = interval(METRICS_TICK); + let mut maintenance_tick = interval(MAINTENANCE_TICK); + + info!("connected to packet and bundle stream"); + + while !exit.load(Ordering::Relaxed) { + tokio::select! { + maybe_msg = packet_stream.message() => { + let resp = maybe_msg?.ok_or(ProxyError::GrpcStreamDisconnected)?; + Self::handle_block_engine_packets(resp, packet_tx, banking_packet_sender, local_config.trust_packets, &mut block_engine_stats)?; + } + maybe_bundles = bundle_stream.message() => { + Self::handle_block_engine_maybe_bundles(maybe_bundles, bundle_tx, &mut block_engine_stats)?; + } + _ = metrics_and_auth_tick.tick() => { + block_engine_stats.report(); + block_engine_stats = BlockEngineStageStats::default(); + + if cluster_info.id() != keypair.pubkey() { + return Err(ProxyError::AuthenticationConnectionError("validator identity changed".to_string())); + } + + if *global_config.lock().unwrap() != *local_config { + return Err(ProxyError::AuthenticationConnectionError("block engine config changed".to_string())); + } + + let (maybe_new_access, maybe_new_refresh) = maybe_refresh_auth_tokens(&mut auth_client, + &access_token, + &refresh_token, + cluster_info, + connection_timeout, + refresh_within_s, + ).await?; + + if let Some(new_token) = maybe_new_access { + num_refresh_access_token += 1; + datapoint_info!( + "block_engine_stage-refresh_access_token", + ("url", &local_config.block_engine_url, String), + ("count", num_refresh_access_token, i64), + ); + *access_token.lock().unwrap() = new_token; + } + if let Some(new_token) = maybe_new_refresh { + num_full_refreshes += 1; + datapoint_info!( + "block_engine_stage-tokens_generated", + ("url", &local_config.block_engine_url, String), + ("count", num_full_refreshes, i64), + ); + refresh_token = new_token; + } + } + _ = maintenance_tick.tick() => { + let block_builder_info = timeout( + *connection_timeout, + client.get_block_builder_fee_info(BlockBuilderFeeInfoRequest{}) + ) + .await + .map_err(|_| ProxyError::MethodTimeout("get_block_builder_fee_info".to_string()))? + .map_err(|e| ProxyError::MethodError(e.to_string()))? + .into_inner(); + + let mut bb_fee = block_builder_fee_info.lock().unwrap(); + bb_fee.block_builder_commission = block_builder_info.commission; + bb_fee.block_builder = Pubkey::from_str(&block_builder_info.pubkey).unwrap_or(bb_fee.block_builder); + } + } + } + Ok(()) + } + + fn handle_block_engine_maybe_bundles( + maybe_bundles_response: Result, Status>, + bundle_sender: &Sender>, + block_engine_stats: &mut BlockEngineStageStats, + ) -> crate::proxy::Result<()> { + let bundles_response = maybe_bundles_response?.ok_or(ProxyError::GrpcStreamDisconnected)?; + let bundles: Vec = bundles_response + .bundles + .into_iter() + .filter_map(|bundle| { + Some(PacketBundle { + batch: PacketBatch::new( + bundle + .bundle? + .packets + .into_iter() + .map(proto_packet_to_packet) + .collect(), + ), + bundle_id: bundle.uuid, + }) + }) + .collect(); + + saturating_add_assign!(block_engine_stats.num_bundles, bundles.len() as u64); + saturating_add_assign!( + block_engine_stats.num_bundle_packets, + bundles.iter().map(|bundle| bundle.batch.len() as u64).sum() + ); + + // NOTE: bundles are sanitized in bundle_sanitizer module + bundle_sender + .send(bundles) + .map_err(|_| ProxyError::PacketForwardError) + } + + fn handle_block_engine_packets( + resp: block_engine::SubscribePacketsResponse, + packet_tx: &Sender, + banking_packet_sender: &BankingPacketSender, + trust_packets: bool, + block_engine_stats: &mut BlockEngineStageStats, + ) -> crate::proxy::Result<()> { + if let Some(batch) = resp.batch { + if batch.packets.is_empty() { + saturating_add_assign!(block_engine_stats.num_empty_packets, 1); + return Ok(()); + } + + let packet_batch = PacketBatch::new( + batch + .packets + .into_iter() + .map(proto_packet_to_packet) + .collect(), + ); + + saturating_add_assign!(block_engine_stats.num_packets, packet_batch.len() as u64); + + if trust_packets { + banking_packet_sender + .send(Arc::new((vec![packet_batch], None))) + .map_err(|_| ProxyError::PacketForwardError)?; + } else { + packet_tx + .send(packet_batch) + .map_err(|_| ProxyError::PacketForwardError)?; + } + } else { + saturating_add_assign!(block_engine_stats.num_empty_packets, 1); + } + + Ok(()) + } + + pub fn is_valid_block_engine_config(config: &BlockEngineConfig) -> bool { + if config.block_engine_url.is_empty() { + warn!("can't connect to block_engine. missing block_engine_url."); + return false; + } + if let Err(e) = Endpoint::from_str(&config.block_engine_url) { + error!( + "can't connect to block engine. error creating block engine endpoint - {}", + e.to_string() + ); + return false; + } + true + } +} diff --git a/core/src/proxy/fetch_stage_manager.rs b/core/src/proxy/fetch_stage_manager.rs new file mode 100644 index 0000000000..38471fc512 --- /dev/null +++ b/core/src/proxy/fetch_stage_manager.rs @@ -0,0 +1,170 @@ +use { + crate::proxy::{HeartbeatEvent, ProxyError}, + crossbeam_channel::{select, tick, Receiver, Sender}, + solana_client::connection_cache::Protocol, + solana_gossip::{cluster_info::ClusterInfo, contact_info}, + solana_perf::packet::PacketBatch, + std::{ + net::SocketAddr, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + thread::{self, Builder, JoinHandle}, + time::{Duration, Instant}, + }, +}; + +const HEARTBEAT_TIMEOUT: Duration = Duration::from_millis(1500); // Empirically determined from load testing +const DISCONNECT_DELAY: Duration = Duration::from_secs(60); +const METRICS_CADENCE: Duration = Duration::from_secs(1); + +/// Manages switching between the validator's tpu ports and that of the proxy's. +/// Switch-overs are triggered by late and missed heartbeats. +pub struct FetchStageManager { + t_hdl: JoinHandle<()>, +} + +impl FetchStageManager { + pub fn new( + // ClusterInfo is used to switch between advertising the proxy's TPU ports and that of this validator's. + cluster_info: Arc, + // Channel that heartbeats are received from. Entirely responsible for triggering switch-overs. + heartbeat_rx: Receiver, + // Channel that packets from FetchStage are intercepted from. + packet_intercept_rx: Receiver, + // Intercepted packets get piped through here. + packet_tx: Sender, + exit: Arc, + ) -> Self { + let t_hdl = Self::start( + cluster_info, + heartbeat_rx, + packet_intercept_rx, + packet_tx, + exit, + ); + + Self { t_hdl } + } + + /// Disconnect fetch behaviour + /// Starts connected + /// When connected and a packet is received, forward it + /// When disconnected, packet is dropped + /// When receiving heartbeat while connected and not pending disconnect + /// Sets pending_disconnect to true and records time + /// When receiving heartbeat while connected, and pending for > DISCONNECT_DELAY_SEC + /// Sets fetch_connected to false, pending_disconnect to false + /// Advertises TPU ports sent in heartbeat + /// When tick is received without heartbeat_received + /// Sets fetch_connected to true, pending_disconnect to false + /// Advertises saved contact info + fn start( + cluster_info: Arc, + heartbeat_rx: Receiver, + packet_intercept_rx: Receiver, + packet_tx: Sender, + exit: Arc, + ) -> JoinHandle<()> { + Builder::new().name("fetch-stage-manager".into()).spawn(move || { + let my_fallback_contact_info = cluster_info.my_contact_info(); + + let mut fetch_connected = true; + let mut heartbeat_received = false; + let mut pending_disconnect = false; + + let mut pending_disconnect_ts = Instant::now(); + + let heartbeat_tick = tick(HEARTBEAT_TIMEOUT); + let metrics_tick = tick(METRICS_CADENCE); + let mut packets_forwarded = 0; + let mut heartbeats_received = 0; + loop { + select! { + recv(packet_intercept_rx) -> pkt => { + match pkt { + Ok(pkt) => { + if fetch_connected { + if packet_tx.send(pkt).is_err() { + error!("{:?}", ProxyError::PacketForwardError); + return; + } + packets_forwarded += 1; + } + } + Err(_) => { + warn!("packet intercept receiver disconnected, shutting down"); + return; + } + } + } + recv(heartbeat_tick) -> _ => { + if exit.load(Ordering::Relaxed) { + break; + } + if !heartbeat_received && (!fetch_connected || pending_disconnect) { + warn!("heartbeat late, reconnecting fetch stage"); + fetch_connected = true; + pending_disconnect = false; + + // unwrap safe here bc contact_info.tpu(Protocol::QUIC) and contact_info.tpu_forwards(Protocol::QUIC) + // are checked on startup + if let Err(e) = Self::set_tpu_addresses(&cluster_info, my_fallback_contact_info.tpu(Protocol::QUIC).unwrap(), my_fallback_contact_info.tpu_forwards(Protocol::QUIC).unwrap()) { + error!("error setting tpu or tpu_fwd to ({:?}, {:?}), error: {:?}", my_fallback_contact_info.tpu(Protocol::QUIC).unwrap(), my_fallback_contact_info.tpu_forwards(Protocol::QUIC).unwrap(), e); + } + heartbeats_received = 0; + } + heartbeat_received = false; + } + recv(heartbeat_rx) -> tpu_info => { + if let Ok((tpu_addr, tpu_forward_addr)) = tpu_info { + heartbeats_received += 1; + heartbeat_received = true; + if fetch_connected && !pending_disconnect { + info!("received heartbeat while fetch stage connected, pending disconnect after delay"); + pending_disconnect_ts = Instant::now(); + pending_disconnect = true; + } + if fetch_connected && pending_disconnect && pending_disconnect_ts.elapsed() > DISCONNECT_DELAY { + info!("disconnecting fetch stage"); + fetch_connected = false; + pending_disconnect = false; + if let Err(e) = Self::set_tpu_addresses(&cluster_info, tpu_addr, tpu_forward_addr) { + error!("error setting tpu or tpu_fwd to ({:?}, {:?}), error: {:?}", tpu_addr, tpu_forward_addr, e); + } + } + } else { + { + warn!("relayer heartbeat receiver disconnected, shutting down"); + return; + } + } + } + recv(metrics_tick) -> _ => { + datapoint_info!( + "relayer-heartbeat", + ("fetch_stage_packets_forwarded", packets_forwarded, i64), + ("heartbeats_received", heartbeats_received, i64), + ); + + } + } + } + }).unwrap() + } + + fn set_tpu_addresses( + cluster_info: &Arc, + tpu_address: SocketAddr, + tpu_forward_address: SocketAddr, + ) -> Result<(), contact_info::Error> { + cluster_info.set_tpu(tpu_address)?; + cluster_info.set_tpu_forwards(tpu_forward_address)?; + Ok(()) + } + + pub fn join(self) -> thread::Result<()> { + self.t_hdl.join() + } +} diff --git a/core/src/proxy/mod.rs b/core/src/proxy/mod.rs new file mode 100644 index 0000000000..86d48482aa --- /dev/null +++ b/core/src/proxy/mod.rs @@ -0,0 +1,100 @@ +//! This module contains logic for connecting to an external Relayer and Block Engine. +//! The Relayer acts as an external TPU and TPU Forward socket while the Block Engine +//! is tasked with streaming high value bundles to the validator. The validator can run +//! in one of 3 modes: +//! 1. Connected to Relayer and Block Engine. +//! - This is the ideal mode as it increases the probability of building the most profitable blocks. +//! 2. Connected only to Relayer. +//! - A validator may choose to run in this mode if the main concern is to offload ingress traffic deduplication and sig-verification. +//! 3. Connected only to Block Engine. +//! - Running in this mode means pending transactions are not exposed to external actors. This mode is ideal if the validator wishes +//! to accept bundles while maintaining some level of privacy for in-flight transactions. + +mod auth; +pub mod block_engine_stage; +pub mod fetch_stage_manager; +pub mod relayer_stage; + +use { + std::{ + net::{AddrParseError, SocketAddr}, + result, + }, + thiserror::Error, + tonic::Status, +}; + +type Result = result::Result; +type HeartbeatEvent = (SocketAddr, SocketAddr); + +#[derive(Error, Debug)] +pub enum ProxyError { + #[error("grpc error: {0}")] + GrpcError(#[from] Status), + + #[error("stream disconnected")] + GrpcStreamDisconnected, + + #[error("heartbeat error")] + HeartbeatChannelError, + + #[error("heartbeat expired")] + HeartbeatExpired, + + #[error("error forwarding packet to banking stage")] + PacketForwardError, + + #[error("missing tpu config: {0:?}")] + MissingTpuSocket(String), + + #[error("invalid socket address: {0:?}")] + InvalidSocketAddress(#[from] AddrParseError), + + #[error("invalid gRPC data: {0:?}")] + InvalidData(String), + + #[error("timeout: {0:?}")] + ConnectionError(#[from] tonic::transport::Error), + + #[error("AuthenticationConnectionTimeout")] + AuthenticationConnectionTimeout, + + #[error("AuthenticationTimeout")] + AuthenticationTimeout, + + #[error("AuthenticationConnectionError: {0:?}")] + AuthenticationConnectionError(String), + + #[error("BlockEngineConnectionTimeout")] + BlockEngineConnectionTimeout, + + #[error("BlockEngineTimeout")] + BlockEngineTimeout, + + #[error("BlockEngineConnectionError: {0:?}")] + BlockEngineConnectionError(String), + + #[error("RelayerConnectionTimeout")] + RelayerConnectionTimeout, + + #[error("RelayerTimeout")] + RelayerEngineTimeout, + + #[error("RelayerConnectionError: {0:?}")] + RelayerConnectionError(String), + + #[error("AuthenticationError: {0:?}")] + AuthenticationError(String), + + #[error("AuthenticationPermissionDenied")] + AuthenticationPermissionDenied, + + #[error("BadAuthenticationToken: {0:?}")] + BadAuthenticationToken(String), + + #[error("MethodTimeout: {0:?}")] + MethodTimeout(String), + + #[error("MethodError: {0:?}")] + MethodError(String), +} diff --git a/core/src/proxy/relayer_stage.rs b/core/src/proxy/relayer_stage.rs new file mode 100644 index 0000000000..3c754fb9e4 --- /dev/null +++ b/core/src/proxy/relayer_stage.rs @@ -0,0 +1,495 @@ +//! Maintains a connection to the Relayer. +//! +//! The external Relayer is responsible for the following: +//! - Acts as a TPU proxy. +//! - Sends transactions to the validator. +//! - Does not bundles to avoid DOS vector. +//! - When validator connects, it changes its TPU and TPU forward address to the relayer. +//! - Expected to send heartbeat to validator as watchdog. If watchdog times out, the validator +//! disconnects and reverts the TPU and TPU forward settings. + +use { + crate::{ + banking_trace::BankingPacketSender, + proto_packet_to_packet, + proxy::{ + auth::{generate_auth_tokens, maybe_refresh_auth_tokens, AuthInterceptor}, + HeartbeatEvent, ProxyError, + }, + }, + crossbeam_channel::Sender, + jito_protos::proto::{ + auth::{auth_service_client::AuthServiceClient, Token}, + relayer::{self, relayer_client::RelayerClient}, + }, + solana_gossip::cluster_info::ClusterInfo, + solana_perf::packet::PacketBatch, + solana_sdk::{ + saturating_add_assign, + signature::{Keypair, Signer}, + }, + std::{ + net::{IpAddr, Ipv4Addr, SocketAddr}, + str::FromStr, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, Mutex, + }, + thread::{self, Builder, JoinHandle}, + time::{Duration, Instant}, + }, + tokio::time::{interval, sleep, timeout}, + tonic::{ + codegen::InterceptedService, + transport::{Channel, Endpoint}, + Streaming, + }, +}; + +const CONNECTION_TIMEOUT_S: u64 = 10; +const CONNECTION_BACKOFF_S: u64 = 5; + +#[derive(Default)] +struct RelayerStageStats { + num_empty_messages: u64, + num_packets: u64, + num_heartbeats: u64, +} + +impl RelayerStageStats { + pub(crate) fn report(&self) { + datapoint_info!( + "relayer_stage-stats", + ("num_empty_messages", self.num_empty_messages, i64), + ("num_packets", self.num_packets, i64), + ("num_heartbeats", self.num_heartbeats, i64), + ); + } +} + +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct RelayerConfig { + /// Relayer URL + pub relayer_url: String, + + /// Interval at which heartbeats are expected. + pub expected_heartbeat_interval: Duration, + + /// The max tolerable age of the last heartbeat. + pub oldest_allowed_heartbeat: Duration, + + /// If set then it will be assumed the backend verified packets so signature verification will be bypassed in the validator. + pub trust_packets: bool, +} + +pub struct RelayerStage { + t_hdls: Vec>, +} + +impl RelayerStage { + pub fn new( + relayer_config: Arc>, + // The keypair stored here is used to sign auth challenges. + cluster_info: Arc, + // Channel that server-sent heartbeats are piped through. + heartbeat_tx: Sender, + // Channel that non-trusted streamed packets are piped through. + packet_tx: Sender, + // Channel that trusted streamed packets are piped through. + banking_packet_sender: BankingPacketSender, + exit: Arc, + ) -> Self { + let thread = Builder::new() + .name("relayer-stage".to_string()) + .spawn(move || { + let rt = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build() + .unwrap(); + + rt.block_on(Self::start( + relayer_config, + cluster_info, + heartbeat_tx, + packet_tx, + banking_packet_sender, + exit, + )); + }) + .unwrap(); + + Self { + t_hdls: vec![thread], + } + } + + pub fn join(self) -> thread::Result<()> { + for t in self.t_hdls { + t.join()?; + } + Ok(()) + } + + #[allow(clippy::too_many_arguments)] + async fn start( + relayer_config: Arc>, + cluster_info: Arc, + heartbeat_tx: Sender, + packet_tx: Sender, + banking_packet_sender: BankingPacketSender, + exit: Arc, + ) { + const CONNECTION_TIMEOUT: Duration = Duration::from_secs(CONNECTION_TIMEOUT_S); + const CONNECTION_BACKOFF: Duration = Duration::from_secs(CONNECTION_BACKOFF_S); + + let mut error_count: u64 = 0; + + while !exit.load(Ordering::Relaxed) { + // Wait until a valid config is supplied (either initially or by admin rpc) + // Use if!/else here to avoid extra CONNECTION_BACKOFF wait on successful termination + if !Self::is_valid_relayer_config(&relayer_config.lock().unwrap()) { + sleep(CONNECTION_BACKOFF).await; + } else if let Err(e) = Self::connect_auth_and_stream( + &relayer_config, + &cluster_info, + &heartbeat_tx, + &packet_tx, + &banking_packet_sender, + &exit, + &CONNECTION_TIMEOUT, + ) + .await + { + match e { + // This error is frequent on hot spares, and the parsed string does not work + // with datapoints (incorrect escaping). + ProxyError::AuthenticationPermissionDenied => { + warn!("relayer permission denied. not on leader schedule. ignore if hot-spare.") + } + e => { + error_count += 1; + datapoint_warn!( + "relayer_stage-proxy_error", + ("count", error_count, i64), + ("error", e.to_string(), String), + ); + } + } + sleep(CONNECTION_BACKOFF).await; + } + } + } + + async fn connect_auth_and_stream( + relayer_config: &Arc>, + cluster_info: &Arc, + heartbeat_tx: &Sender, + packet_tx: &Sender, + banking_packet_sender: &BankingPacketSender, + exit: &Arc, + connection_timeout: &Duration, + ) -> crate::proxy::Result<()> { + // Get a copy of configs here in case they have changed at runtime + let keypair = cluster_info.keypair().clone(); + let local_config = relayer_config.lock().unwrap().clone(); + + let mut backend_endpoint = Endpoint::from_shared(local_config.relayer_url.clone()) + .map_err(|_| { + ProxyError::RelayerConnectionError(format!( + "invalid relayer url value: {}", + local_config.relayer_url + )) + })? + .tcp_keepalive(Some(Duration::from_secs(60))); + if local_config.relayer_url.starts_with("https") { + backend_endpoint = backend_endpoint + .tls_config(tonic::transport::ClientTlsConfig::new()) + .map_err(|_| { + ProxyError::RelayerConnectionError( + "failed to set tls_config for relayer service".to_string(), + ) + })?; + } + + debug!("connecting to auth: {}", local_config.relayer_url); + let auth_channel = timeout(*connection_timeout, backend_endpoint.connect()) + .await + .map_err(|_| ProxyError::AuthenticationConnectionTimeout)? + .map_err(|e| ProxyError::AuthenticationConnectionError(e.to_string()))?; + + let mut auth_client = AuthServiceClient::new(auth_channel); + + debug!("generating authentication token"); + let (access_token, refresh_token) = timeout( + *connection_timeout, + generate_auth_tokens(&mut auth_client, &keypair), + ) + .await + .map_err(|_| ProxyError::AuthenticationTimeout)??; + + datapoint_info!( + "relayer_stage-tokens_generated", + ("url", local_config.relayer_url, String), + ("count", 1, i64), + ); + + debug!("connecting to relayer: {}", local_config.relayer_url); + let relayer_channel = timeout(*connection_timeout, backend_endpoint.connect()) + .await + .map_err(|_| ProxyError::RelayerConnectionTimeout)? + .map_err(|e| ProxyError::RelayerConnectionError(e.to_string()))?; + + let access_token = Arc::new(Mutex::new(access_token)); + let relayer_client = RelayerClient::with_interceptor( + relayer_channel, + AuthInterceptor::new(access_token.clone()), + ); + + Self::start_consuming_relayer_packets( + relayer_client, + heartbeat_tx, + packet_tx, + banking_packet_sender, + &local_config, + relayer_config, + exit, + auth_client, + access_token, + refresh_token, + keypair, + cluster_info, + connection_timeout, + ) + .await + } + + #[allow(clippy::too_many_arguments)] + async fn start_consuming_relayer_packets( + mut client: RelayerClient>, + heartbeat_tx: &Sender, + packet_tx: &Sender, + banking_packet_sender: &BankingPacketSender, + local_config: &RelayerConfig, // local copy of config with current connections + global_config: &Arc>, // guarded reference for detecting run-time updates + exit: &Arc, + auth_client: AuthServiceClient, + access_token: Arc>, + refresh_token: Token, + keypair: Arc, + cluster_info: &Arc, + connection_timeout: &Duration, + ) -> crate::proxy::Result<()> { + let heartbeat_event: HeartbeatEvent = { + let tpu_config = timeout( + *connection_timeout, + client.get_tpu_configs(relayer::GetTpuConfigsRequest {}), + ) + .await + .map_err(|_| ProxyError::MethodTimeout("relayer_get_tpu_configs".to_string()))? + .map_err(|e| ProxyError::MethodError(e.to_string()))? + .into_inner(); + + let tpu_addr = tpu_config + .tpu + .ok_or_else(|| ProxyError::MissingTpuSocket("tpu".to_string()))?; + let tpu_forward_addr = tpu_config + .tpu_forward + .ok_or_else(|| ProxyError::MissingTpuSocket("tpu_fwd".to_string()))?; + + let tpu_ip = IpAddr::from(tpu_addr.ip.parse::()?); + let tpu_forward_ip = IpAddr::from(tpu_forward_addr.ip.parse::()?); + + let tpu_socket = SocketAddr::new(tpu_ip, tpu_addr.port as u16); + let tpu_forward_socket = SocketAddr::new(tpu_forward_ip, tpu_forward_addr.port as u16); + (tpu_socket, tpu_forward_socket) + }; + + let packet_stream = timeout( + *connection_timeout, + client.subscribe_packets(relayer::SubscribePacketsRequest {}), + ) + .await + .map_err(|_| ProxyError::MethodTimeout("relayer_subscribe_packets".to_string()))? + .map_err(|e| ProxyError::MethodError(e.to_string()))? + .into_inner(); + + Self::consume_packet_stream( + heartbeat_event, + heartbeat_tx, + packet_stream, + packet_tx, + local_config, + global_config, + banking_packet_sender, + exit, + auth_client, + access_token, + refresh_token, + keypair, + cluster_info, + connection_timeout, + ) + .await + } + + #[allow(clippy::too_many_arguments)] + async fn consume_packet_stream( + heartbeat_event: HeartbeatEvent, + heartbeat_tx: &Sender, + mut packet_stream: Streaming, + packet_tx: &Sender, + local_config: &RelayerConfig, // local copy of config with current connections + global_config: &Arc>, // guarded reference for detecting run-time updates + banking_packet_sender: &BankingPacketSender, + exit: &Arc, + mut auth_client: AuthServiceClient, + access_token: Arc>, + mut refresh_token: Token, + keypair: Arc, + cluster_info: &Arc, + connection_timeout: &Duration, + ) -> crate::proxy::Result<()> { + const METRICS_TICK: Duration = Duration::from_secs(1); + let refresh_within_s: u64 = METRICS_TICK.as_secs().saturating_mul(3).saturating_div(2); + + let mut relayer_stats = RelayerStageStats::default(); + let mut metrics_and_auth_tick = interval(METRICS_TICK); + + let mut num_full_refreshes: u64 = 1; + let mut num_refresh_access_token: u64 = 0; + + let mut heartbeat_check_interval = interval(local_config.expected_heartbeat_interval); + let mut last_heartbeat_ts = Instant::now(); + + info!("connected to packet stream"); + + while !exit.load(Ordering::Relaxed) { + tokio::select! { + maybe_msg = packet_stream.message() => { + let resp = maybe_msg?.ok_or(ProxyError::GrpcStreamDisconnected)?; + Self::handle_relayer_packets(resp, heartbeat_event, heartbeat_tx, &mut last_heartbeat_ts, packet_tx, local_config.trust_packets, banking_packet_sender, &mut relayer_stats)?; + } + _ = heartbeat_check_interval.tick() => { + if last_heartbeat_ts.elapsed() > local_config.oldest_allowed_heartbeat { + return Err(ProxyError::HeartbeatExpired); + } + } + _ = metrics_and_auth_tick.tick() => { + relayer_stats.report(); + relayer_stats = RelayerStageStats::default(); + + if cluster_info.id() != keypair.pubkey() { + return Err(ProxyError::AuthenticationConnectionError("validator identity changed".to_string())); + } + + if *global_config.lock().unwrap() != *local_config { + return Err(ProxyError::AuthenticationConnectionError("relayer config changed".to_string())); + } + + let (maybe_new_access, maybe_new_refresh) = maybe_refresh_auth_tokens(&mut auth_client, + &access_token, + &refresh_token, + cluster_info, + connection_timeout, + refresh_within_s, + ).await?; + + if let Some(new_token) = maybe_new_access { + num_refresh_access_token += 1; + datapoint_info!( + "relayer_stage-refresh_access_token", + ("url", &local_config.relayer_url, String), + ("count", num_refresh_access_token, i64), + ); + *access_token.lock().unwrap() = new_token; + } + if let Some(new_token) = maybe_new_refresh { + num_full_refreshes += 1; + datapoint_info!( + "relayer_stage-tokens_generated", + ("url", &local_config.relayer_url, String), + ("count", num_full_refreshes, i64), + ); + refresh_token = new_token; + } + } + } + } + Ok(()) + } + + fn handle_relayer_packets( + subscribe_packets_resp: relayer::SubscribePacketsResponse, + heartbeat_event: HeartbeatEvent, + heartbeat_tx: &Sender, + last_heartbeat_ts: &mut Instant, + packet_tx: &Sender, + trust_packets: bool, + banking_packet_sender: &BankingPacketSender, + relayer_stats: &mut RelayerStageStats, + ) -> crate::proxy::Result<()> { + match subscribe_packets_resp.msg { + None => { + saturating_add_assign!(relayer_stats.num_empty_messages, 1); + } + Some(relayer::subscribe_packets_response::Msg::Batch(proto_batch)) => { + if proto_batch.packets.is_empty() { + saturating_add_assign!(relayer_stats.num_empty_messages, 1); + return Ok(()); + } + + let packet_batch = PacketBatch::new( + proto_batch + .packets + .into_iter() + .map(proto_packet_to_packet) + .collect(), + ); + + saturating_add_assign!(relayer_stats.num_packets, packet_batch.len() as u64); + + if trust_packets { + banking_packet_sender + .send(Arc::new((vec![packet_batch], None))) + .map_err(|_| ProxyError::PacketForwardError)?; + } else { + packet_tx + .send(packet_batch) + .map_err(|_| ProxyError::PacketForwardError)?; + } + } + Some(relayer::subscribe_packets_response::Msg::Heartbeat(_)) => { + saturating_add_assign!(relayer_stats.num_heartbeats, 1); + + *last_heartbeat_ts = Instant::now(); + heartbeat_tx + .send(heartbeat_event) + .map_err(|_| ProxyError::HeartbeatChannelError)?; + } + } + Ok(()) + } + + pub fn is_valid_relayer_config(config: &RelayerConfig) -> bool { + if config.relayer_url.is_empty() { + warn!("can't connect to relayer. missing relayer_url."); + return false; + } + if config.oldest_allowed_heartbeat.is_zero() { + error!("can't connect to relayer. oldest allowed heartbeat must be greater than 0."); + return false; + } + if config.expected_heartbeat_interval.is_zero() { + error!("can't connect to relayer. expected heartbeat interval must be greater than 0."); + return false; + } + if let Err(e) = Endpoint::from_str(&config.relayer_url) { + error!( + "can't connect to relayer. error creating relayer endpoint - {}", + e.to_string() + ); + return false; + } + true + } +} diff --git a/core/src/tip_manager.rs b/core/src/tip_manager.rs new file mode 100644 index 0000000000..724abbbe02 --- /dev/null +++ b/core/src/tip_manager.rs @@ -0,0 +1,583 @@ +use { + crate::proxy::block_engine_stage::BlockBuilderFeeInfo, + anchor_lang::{ + solana_program::hash::Hash, AccountDeserialize, InstructionData, ToAccountMetas, + }, + jito_tip_distribution::sdk::{ + derive_config_account_address, derive_tip_distribution_account_address, + instruction::{ + initialize_ix, initialize_tip_distribution_account_ix, InitializeAccounts, + InitializeArgs, InitializeTipDistributionAccountAccounts, + InitializeTipDistributionAccountArgs, + }, + }, + jito_tip_payment::{ + Config, InitBumps, TipPaymentAccount, CONFIG_ACCOUNT_SEED, TIP_ACCOUNT_SEED_0, + TIP_ACCOUNT_SEED_1, TIP_ACCOUNT_SEED_2, TIP_ACCOUNT_SEED_3, TIP_ACCOUNT_SEED_4, + TIP_ACCOUNT_SEED_5, TIP_ACCOUNT_SEED_6, TIP_ACCOUNT_SEED_7, + }, + log::warn, + solana_bundle::TipError, + solana_runtime::bank::Bank, + solana_sdk::{ + account::ReadableAccount, + bundle::{derive_bundle_id_from_sanitized_transactions, SanitizedBundle}, + instruction::Instruction, + pubkey::Pubkey, + signature::Keypair, + signer::Signer, + stake_history::Epoch, + system_program, + transaction::{SanitizedTransaction, Transaction}, + }, + std::{collections::HashSet, sync::Arc}, +}; + +pub type Result = std::result::Result; + +#[derive(Debug, Clone)] +struct TipPaymentProgramInfo { + program_id: Pubkey, + + config_pda_bump: (Pubkey, u8), + tip_pda_0: (Pubkey, u8), + tip_pda_1: (Pubkey, u8), + tip_pda_2: (Pubkey, u8), + tip_pda_3: (Pubkey, u8), + tip_pda_4: (Pubkey, u8), + tip_pda_5: (Pubkey, u8), + tip_pda_6: (Pubkey, u8), + tip_pda_7: (Pubkey, u8), +} + +/// Contains metadata regarding the tip-distribution account. +/// The PDAs contained in this struct are presumed to be owned by the program. +#[derive(Debug, Clone)] +struct TipDistributionProgramInfo { + /// The tip-distribution program_id. + program_id: Pubkey, + + /// Singleton [Config] PDA and bump tuple. + config_pda_and_bump: (Pubkey, u8), +} + +/// This config is used on each invocation to the `initialize_tip_distribution_account` instruction. +#[derive(Debug, Clone)] +pub struct TipDistributionAccountConfig { + /// The account with authority to upload merkle-roots to this validator's [TipDistributionAccount]. + pub merkle_root_upload_authority: Pubkey, + + /// This validator's vote account. + pub vote_account: Pubkey, + + /// This validator's commission rate BPS for tips in the [TipDistributionAccount]. + pub commission_bps: u16, +} + +impl Default for TipDistributionAccountConfig { + fn default() -> Self { + Self { + merkle_root_upload_authority: Pubkey::new_unique(), + vote_account: Pubkey::new_unique(), + commission_bps: 0, + } + } +} + +#[derive(Debug, Clone)] +pub struct TipManager { + tip_payment_program_info: TipPaymentProgramInfo, + tip_distribution_program_info: TipDistributionProgramInfo, + tip_distribution_account_config: TipDistributionAccountConfig, +} + +#[derive(Clone)] +pub struct TipManagerConfig { + pub tip_payment_program_id: Pubkey, + pub tip_distribution_program_id: Pubkey, + pub tip_distribution_account_config: TipDistributionAccountConfig, +} + +impl Default for TipManagerConfig { + fn default() -> Self { + TipManagerConfig { + tip_payment_program_id: Pubkey::new_unique(), + tip_distribution_program_id: Pubkey::new_unique(), + tip_distribution_account_config: TipDistributionAccountConfig::default(), + } + } +} + +impl TipManager { + pub fn new(config: TipManagerConfig) -> TipManager { + let TipManagerConfig { + tip_payment_program_id, + tip_distribution_program_id, + tip_distribution_account_config, + } = config; + + let config_pda_bump = + Pubkey::find_program_address(&[CONFIG_ACCOUNT_SEED], &tip_payment_program_id); + + let tip_pda_0 = + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_0], &tip_payment_program_id); + let tip_pda_1 = + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_1], &tip_payment_program_id); + let tip_pda_2 = + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_2], &tip_payment_program_id); + let tip_pda_3 = + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_3], &tip_payment_program_id); + let tip_pda_4 = + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_4], &tip_payment_program_id); + let tip_pda_5 = + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_5], &tip_payment_program_id); + let tip_pda_6 = + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_6], &tip_payment_program_id); + let tip_pda_7 = + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_7], &tip_payment_program_id); + + let config_pda_and_bump = derive_config_account_address(&tip_distribution_program_id); + + TipManager { + tip_payment_program_info: TipPaymentProgramInfo { + program_id: tip_payment_program_id, + config_pda_bump, + tip_pda_0, + tip_pda_1, + tip_pda_2, + tip_pda_3, + tip_pda_4, + tip_pda_5, + tip_pda_6, + tip_pda_7, + }, + tip_distribution_program_info: TipDistributionProgramInfo { + program_id: tip_distribution_program_id, + config_pda_and_bump, + }, + tip_distribution_account_config, + } + } + + pub fn tip_payment_program_id(&self) -> Pubkey { + self.tip_payment_program_info.program_id + } + + pub fn tip_distribution_program_id(&self) -> Pubkey { + self.tip_distribution_program_info.program_id + } + + /// Returns the [Config] account owned by the tip-payment program. + pub fn tip_payment_config_pubkey(&self) -> Pubkey { + self.tip_payment_program_info.config_pda_bump.0 + } + + /// Returns the [Config] account owned by the tip-distribution program. + pub fn tip_distribution_config_pubkey(&self) -> Pubkey { + self.tip_distribution_program_info.config_pda_and_bump.0 + } + + /// Given a bank, returns the current `tip_receiver` configured with the tip-payment program. + pub fn get_configured_tip_receiver(&self, bank: &Bank) -> Result { + Ok(self.get_tip_payment_config_account(bank)?.tip_receiver) + } + + pub fn get_tip_accounts(&self) -> HashSet { + HashSet::from([ + self.tip_payment_program_info.tip_pda_0.0, + self.tip_payment_program_info.tip_pda_1.0, + self.tip_payment_program_info.tip_pda_2.0, + self.tip_payment_program_info.tip_pda_3.0, + self.tip_payment_program_info.tip_pda_4.0, + self.tip_payment_program_info.tip_pda_5.0, + self.tip_payment_program_info.tip_pda_6.0, + self.tip_payment_program_info.tip_pda_7.0, + ]) + } + + pub fn get_tip_payment_config_account(&self, bank: &Bank) -> Result { + let config_data = bank + .get_account(&self.tip_payment_program_info.config_pda_bump.0) + .ok_or(TipError::AccountMissing( + self.tip_payment_program_info.config_pda_bump.0, + ))?; + + Ok(Config::try_deserialize(&mut config_data.data())?) + } + + /// Only called once during contract creation. + pub fn initialize_tip_payment_program_tx( + &self, + recent_blockhash: Hash, + keypair: &Keypair, + ) -> SanitizedTransaction { + let init_ix = Instruction { + program_id: self.tip_payment_program_info.program_id, + data: jito_tip_payment::instruction::Initialize { + _bumps: InitBumps { + config: self.tip_payment_program_info.config_pda_bump.1, + tip_payment_account_0: self.tip_payment_program_info.tip_pda_0.1, + tip_payment_account_1: self.tip_payment_program_info.tip_pda_1.1, + tip_payment_account_2: self.tip_payment_program_info.tip_pda_2.1, + tip_payment_account_3: self.tip_payment_program_info.tip_pda_3.1, + tip_payment_account_4: self.tip_payment_program_info.tip_pda_4.1, + tip_payment_account_5: self.tip_payment_program_info.tip_pda_5.1, + tip_payment_account_6: self.tip_payment_program_info.tip_pda_6.1, + tip_payment_account_7: self.tip_payment_program_info.tip_pda_7.1, + }, + } + .data(), + accounts: jito_tip_payment::accounts::Initialize { + config: self.tip_payment_program_info.config_pda_bump.0, + tip_payment_account_0: self.tip_payment_program_info.tip_pda_0.0, + tip_payment_account_1: self.tip_payment_program_info.tip_pda_1.0, + tip_payment_account_2: self.tip_payment_program_info.tip_pda_2.0, + tip_payment_account_3: self.tip_payment_program_info.tip_pda_3.0, + tip_payment_account_4: self.tip_payment_program_info.tip_pda_4.0, + tip_payment_account_5: self.tip_payment_program_info.tip_pda_5.0, + tip_payment_account_6: self.tip_payment_program_info.tip_pda_6.0, + tip_payment_account_7: self.tip_payment_program_info.tip_pda_7.0, + system_program: system_program::id(), + payer: keypair.pubkey(), + } + .to_account_metas(None), + }; + SanitizedTransaction::try_from_legacy_transaction(Transaction::new_signed_with_payer( + &[init_ix], + Some(&keypair.pubkey()), + &[keypair], + recent_blockhash, + )) + .unwrap() + } + + /// Returns this validator's [TipDistributionAccount] PDA derived from the provided epoch. + pub fn get_my_tip_distribution_pda(&self, epoch: Epoch) -> Pubkey { + derive_tip_distribution_account_address( + &self.tip_distribution_program_info.program_id, + &self.tip_distribution_account_config.vote_account, + epoch, + ) + .0 + } + + /// Returns whether or not the tip-payment program should be initialized. + pub fn should_initialize_tip_payment_program(&self, bank: &Bank) -> bool { + match bank.get_account(&self.tip_payment_config_pubkey()) { + None => true, + Some(account) => account.owner() != &self.tip_payment_program_info.program_id, + } + } + + /// Returns whether or not the tip-distribution program's [Config] PDA should be initialized. + pub fn should_initialize_tip_distribution_config(&self, bank: &Bank) -> bool { + match bank.get_account(&self.tip_distribution_config_pubkey()) { + None => true, + Some(account) => account.owner() != &self.tip_distribution_program_info.program_id, + } + } + + /// Returns whether or not the current [TipDistributionAccount] PDA should be initialized for this epoch. + pub fn should_init_tip_distribution_account(&self, bank: &Bank) -> bool { + let pda = derive_tip_distribution_account_address( + &self.tip_distribution_program_info.program_id, + &self.tip_distribution_account_config.vote_account, + bank.epoch(), + ) + .0; + match bank.get_account(&pda) { + None => true, + // Since anyone can derive the PDA and send it lamports we must also check the owner is the program. + Some(account) => account.owner() != &self.tip_distribution_program_info.program_id, + } + } + + /// Creates an [Initialize] transaction object. + pub fn initialize_tip_distribution_config_tx( + &self, + recent_blockhash: Hash, + kp: &Keypair, + ) -> SanitizedTransaction { + let ix = initialize_ix( + self.tip_distribution_program_info.program_id, + InitializeArgs { + authority: kp.pubkey(), + expired_funds_account: kp.pubkey(), + num_epochs_valid: 10, + max_validator_commission_bps: 10_000, + bump: self.tip_distribution_program_info.config_pda_and_bump.1, + }, + InitializeAccounts { + config: self.tip_distribution_program_info.config_pda_and_bump.0, + system_program: system_program::id(), + initializer: kp.pubkey(), + }, + ); + + SanitizedTransaction::try_from_legacy_transaction(Transaction::new_signed_with_payer( + &[ix], + Some(&kp.pubkey()), + &[kp], + recent_blockhash, + )) + .unwrap() + } + + /// Creates an [InitializeTipDistributionAccount] transaction object using the provided Epoch. + pub fn initialize_tip_distribution_account_tx( + &self, + recent_blockhash: Hash, + epoch: Epoch, + keypair: &Keypair, + ) -> SanitizedTransaction { + let (tip_distribution_account, bump) = derive_tip_distribution_account_address( + &self.tip_distribution_program_info.program_id, + &self.tip_distribution_account_config.vote_account, + epoch, + ); + + let ix = initialize_tip_distribution_account_ix( + self.tip_distribution_program_info.program_id, + InitializeTipDistributionAccountArgs { + merkle_root_upload_authority: self + .tip_distribution_account_config + .merkle_root_upload_authority, + validator_commission_bps: self.tip_distribution_account_config.commission_bps, + bump, + }, + InitializeTipDistributionAccountAccounts { + config: self.tip_distribution_program_info.config_pda_and_bump.0, + tip_distribution_account, + system_program: system_program::id(), + signer: keypair.pubkey(), + validator_vote_account: self.tip_distribution_account_config.vote_account, + }, + ); + + SanitizedTransaction::try_from_legacy_transaction(Transaction::new_signed_with_payer( + &[ix], + Some(&keypair.pubkey()), + &[keypair], + recent_blockhash, + )) + .unwrap() + } + + /// Builds a transaction that changes the current tip receiver to new_tip_receiver. + /// The on-chain program will transfer tips sitting in the tip accounts to the tip receiver + /// before changing ownership. + pub fn change_tip_receiver_and_block_builder_tx( + &self, + new_tip_receiver: &Pubkey, + bank: &Bank, + keypair: &Keypair, + block_builder: &Pubkey, + block_builder_commission: u64, + ) -> Result { + let config = self.get_tip_payment_config_account(bank)?; + Ok(self.build_change_tip_receiver_and_block_builder_tx( + &config.tip_receiver, + new_tip_receiver, + bank, + keypair, + &config.block_builder, + block_builder, + block_builder_commission, + )) + } + + pub fn build_change_tip_receiver_and_block_builder_tx( + &self, + old_tip_receiver: &Pubkey, + new_tip_receiver: &Pubkey, + bank: &Bank, + keypair: &Keypair, + old_block_builder: &Pubkey, + block_builder: &Pubkey, + block_builder_commission: u64, + ) -> SanitizedTransaction { + let change_tip_ix = Instruction { + program_id: self.tip_payment_program_info.program_id, + data: jito_tip_payment::instruction::ChangeTipReceiver {}.data(), + accounts: jito_tip_payment::accounts::ChangeTipReceiver { + config: self.tip_payment_program_info.config_pda_bump.0, + old_tip_receiver: *old_tip_receiver, + new_tip_receiver: *new_tip_receiver, + block_builder: *old_block_builder, + tip_payment_account_0: self.tip_payment_program_info.tip_pda_0.0, + tip_payment_account_1: self.tip_payment_program_info.tip_pda_1.0, + tip_payment_account_2: self.tip_payment_program_info.tip_pda_2.0, + tip_payment_account_3: self.tip_payment_program_info.tip_pda_3.0, + tip_payment_account_4: self.tip_payment_program_info.tip_pda_4.0, + tip_payment_account_5: self.tip_payment_program_info.tip_pda_5.0, + tip_payment_account_6: self.tip_payment_program_info.tip_pda_6.0, + tip_payment_account_7: self.tip_payment_program_info.tip_pda_7.0, + signer: keypair.pubkey(), + } + .to_account_metas(None), + }; + let change_block_builder_ix = Instruction { + program_id: self.tip_payment_program_info.program_id, + data: jito_tip_payment::instruction::ChangeBlockBuilder { + block_builder_commission, + } + .data(), + accounts: jito_tip_payment::accounts::ChangeBlockBuilder { + config: self.tip_payment_program_info.config_pda_bump.0, + tip_receiver: *new_tip_receiver, // tip receiver will have just changed in previous ix + old_block_builder: *old_block_builder, + new_block_builder: *block_builder, + tip_payment_account_0: self.tip_payment_program_info.tip_pda_0.0, + tip_payment_account_1: self.tip_payment_program_info.tip_pda_1.0, + tip_payment_account_2: self.tip_payment_program_info.tip_pda_2.0, + tip_payment_account_3: self.tip_payment_program_info.tip_pda_3.0, + tip_payment_account_4: self.tip_payment_program_info.tip_pda_4.0, + tip_payment_account_5: self.tip_payment_program_info.tip_pda_5.0, + tip_payment_account_6: self.tip_payment_program_info.tip_pda_6.0, + tip_payment_account_7: self.tip_payment_program_info.tip_pda_7.0, + signer: keypair.pubkey(), + } + .to_account_metas(None), + }; + SanitizedTransaction::try_from_legacy_transaction(Transaction::new_signed_with_payer( + &[change_tip_ix, change_block_builder_ix], + Some(&keypair.pubkey()), + &[keypair], + bank.last_blockhash(), + )) + .unwrap() + } + + /// Returns the balance of all the MEV tip accounts + pub fn get_tip_account_balances(&self, bank: &Arc) -> Vec<(Pubkey, u64)> { + let accounts = self.get_tip_accounts(); + accounts + .into_iter() + .map(|account| { + let balance = bank.get_balance(&account); + (account, balance) + }) + .collect() + } + + /// Returns the balance of all the MEV tip accounts above the rent-exempt amount. + /// NOTE: the on-chain program has rent_exempt = force + pub fn get_tip_account_balances_above_rent_exempt( + &self, + bank: &Arc, + ) -> Vec<(Pubkey, u64)> { + let accounts = self.get_tip_accounts(); + accounts + .into_iter() + .map(|account| { + let account_data = bank.get_account(&account).unwrap_or_default(); + let balance = bank.get_balance(&account); + let rent_exempt = + bank.get_minimum_balance_for_rent_exemption(account_data.data().len()); + // NOTE: don't unwrap here in case bug in on-chain program, don't want all validators to crash + // if program gets stuck in bad state + (account, balance.checked_sub(rent_exempt).unwrap_or_else(|| { + warn!("balance is below rent exempt amount. balance: {} rent_exempt: {} acc size: {}", balance, rent_exempt, TipPaymentAccount::SIZE); + 0 + })) + }) + .collect() + } + + /// Return a bundle that is capable of calling the initialize instructions on the two tip payment programs + /// This is mainly helpful for local development and shouldn't run on testnet and mainnet, assuming the + /// correct TipManager configuration is set. + pub fn get_initialize_tip_programs_bundle( + &self, + bank: &Bank, + keypair: &Keypair, + ) -> Option { + let maybe_init_tip_payment_config_tx = if self.should_initialize_tip_payment_program(bank) { + debug!("should_initialize_tip_payment_program=true"); + Some(self.initialize_tip_payment_program_tx(bank.last_blockhash(), keypair)) + } else { + None + }; + + let maybe_init_tip_distro_config_tx = + if self.should_initialize_tip_distribution_config(bank) { + debug!("should_initialize_tip_distribution_config=true"); + Some(self.initialize_tip_distribution_config_tx(bank.last_blockhash(), keypair)) + } else { + None + }; + + let transactions = [ + maybe_init_tip_payment_config_tx, + maybe_init_tip_distro_config_tx, + ] + .into_iter() + .flatten() + .collect::>(); + + if transactions.is_empty() { + None + } else { + let bundle_id = derive_bundle_id_from_sanitized_transactions(&transactions); + Some(SanitizedBundle { + transactions, + bundle_id, + }) + } + } + + pub fn get_tip_programs_crank_bundle( + &self, + bank: &Bank, + keypair: &Keypair, + block_builder_fee_info: &BlockBuilderFeeInfo, + ) -> Result> { + let maybe_init_tip_distro_account_tx = if self.should_init_tip_distribution_account(bank) { + debug!("should_init_tip_distribution_account=true"); + Some(self.initialize_tip_distribution_account_tx( + bank.last_blockhash(), + bank.epoch(), + keypair, + )) + } else { + None + }; + + let configured_tip_receiver = self.get_configured_tip_receiver(bank)?; + let my_tip_receiver = self.get_my_tip_distribution_pda(bank.epoch()); + let maybe_change_tip_receiver_tx = if configured_tip_receiver != my_tip_receiver { + debug!("change_tip_receiver=true"); + Some(self.change_tip_receiver_and_block_builder_tx( + &my_tip_receiver, + bank, + keypair, + &block_builder_fee_info.block_builder, + block_builder_fee_info.block_builder_commission, + )?) + } else { + None + }; + debug!( + "maybe_change_tip_receiver_tx: {:?}", + maybe_change_tip_receiver_tx + ); + + let transactions = [ + maybe_init_tip_distro_account_tx, + maybe_change_tip_receiver_tx, + ] + .into_iter() + .flatten() + .collect::>(); + + if transactions.is_empty() { + Ok(None) + } else { + let bundle_id = derive_bundle_id_from_sanitized_transactions(&transactions); + Ok(Some(SanitizedBundle { + transactions, + bundle_id, + })) + } + } +} diff --git a/core/src/tpu.rs b/core/src/tpu.rs index 028a88f416..74dd1da529 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -6,14 +6,21 @@ use { crate::{ banking_stage::BankingStage, banking_trace::{BankingTracer, TracerThread}, + bundle_stage::{bundle_account_locker::BundleAccountLocker, BundleStage}, cluster_info_vote_listener::{ ClusterInfoVoteListener, GossipDuplicateConfirmedSlotsSender, GossipVerifiedVoteHashSender, VerifiedVoteSender, VoteTracker, }, fetch_stage::FetchStage, + proxy::{ + block_engine_stage::{BlockBuilderFeeInfo, BlockEngineConfig, BlockEngineStage}, + fetch_stage_manager::FetchStageManager, + relayer_stage::{RelayerConfig, RelayerStage}, + }, sigverify::TransactionSigVerifier, sigverify_stage::SigVerifyStage, staked_nodes_updater_service::StakedNodesUpdaterService, + tip_manager::{TipManager, TipManagerConfig}, tpu_entry_notifier::TpuEntryNotifier, validator::{BlockProductionMethod, GeneratorConfig}, }, @@ -31,7 +38,7 @@ use { rpc_subscriptions::RpcSubscriptions, }, solana_runtime::{bank_forks::BankForks, prioritization_fee_cache::PrioritizationFeeCache}, - solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Keypair}, + solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Keypair, signer::Signer}, solana_streamer::{ nonblocking::quic::DEFAULT_WAIT_FOR_CHUNK_TIMEOUT, quic::{spawn_server, MAX_STAKED_CONNECTIONS, MAX_UNSTAKED_CONNECTIONS}, @@ -40,9 +47,9 @@ use { solana_turbine::broadcast_stage::{BroadcastStage, BroadcastStageType}, solana_vote::vote_sender_types::{ReplayVoteReceiver, ReplayVoteSender}, std::{ - collections::HashMap, + collections::{HashMap, HashSet}, net::{SocketAddr, UdpSocket}, - sync::{atomic::AtomicBool, Arc, RwLock}, + sync::{atomic::AtomicBool, Arc, Mutex, RwLock}, thread, time::Duration, }, @@ -73,6 +80,10 @@ pub struct Tpu { tpu_entry_notifier: Option, staked_nodes_updater_service: StakedNodesUpdaterService, tracer_thread_hdl: TracerThread, + relayer_stage: RelayerStage, + block_engine_stage: BlockEngineStage, + fetch_stage_manager: FetchStageManager, + bundle_stage: BundleStage, } impl Tpu { @@ -111,6 +122,11 @@ impl Tpu { prioritization_fee_cache: &Arc, block_production_method: BlockProductionMethod, _generator_config: Option, /* vestigial code for replay invalidator */ + block_engine_config: Arc>, + relayer_config: Arc>, + tip_manager_config: TipManagerConfig, + shred_receiver_address: Arc>>, + preallocated_bundle_cost: u64, ) -> Self { let TpuSockets { transactions: transactions_sockets, @@ -121,7 +137,10 @@ impl Tpu { transactions_forwards_quic: transactions_forwards_quic_sockets, } = sockets; - let (packet_sender, packet_receiver) = unbounded(); + // Packets from fetch stage and quic server are intercepted and sent through fetch_stage_manager + // If relayer is connected, packets are dropped. If not, packets are forwarded on to packet_sender + let (packet_intercept_sender, packet_intercept_receiver) = unbounded(); + let (vote_packet_sender, vote_packet_receiver) = unbounded(); let (forwarded_packet_sender, forwarded_packet_receiver) = unbounded(); let fetch_stage = FetchStage::new_with_sender( @@ -129,7 +148,7 @@ impl Tpu { tpu_forwards_sockets, tpu_vote_sockets, exit.clone(), - &packet_sender, + &packet_intercept_sender, &vote_packet_sender, &forwarded_packet_sender, forwarded_packet_receiver, @@ -157,7 +176,7 @@ impl Tpu { .tpu(Protocol::QUIC) .expect("Operator must spin up node with valid (QUIC) TPU address") .ip(), - packet_sender, + packet_intercept_sender, exit.clone(), MAX_QUIC_CONNECTIONS_PER_PEER, staked_nodes.clone(), @@ -188,8 +207,10 @@ impl Tpu { ) .unwrap(); + let (packet_sender, packet_receiver) = unbounded(); + let sigverify_stage = { - let verifier = TransactionSigVerifier::new(non_vote_sender); + let verifier = TransactionSigVerifier::new(non_vote_sender.clone()); SigVerifyStage::new(packet_receiver, verifier, "tpu-verifier") }; @@ -202,6 +223,41 @@ impl Tpu { let (gossip_vote_sender, gossip_vote_receiver) = banking_tracer.create_channel_gossip_vote(); + + let block_builder_fee_info = Arc::new(Mutex::new(BlockBuilderFeeInfo { + block_builder: cluster_info.keypair().pubkey(), + block_builder_commission: 0, + })); + + let (bundle_sender, bundle_receiver) = unbounded(); + let block_engine_stage = BlockEngineStage::new( + block_engine_config, + bundle_sender, + cluster_info.clone(), + packet_sender.clone(), + non_vote_sender.clone(), + exit.clone(), + &block_builder_fee_info, + ); + + let (heartbeat_tx, heartbeat_rx) = unbounded(); + let fetch_stage_manager = FetchStageManager::new( + cluster_info.clone(), + heartbeat_rx, + packet_intercept_receiver, + packet_sender.clone(), + exit.clone(), + ); + + let relayer_stage = RelayerStage::new( + relayer_config, + cluster_info.clone(), + heartbeat_tx, + packet_sender, + non_vote_sender, + exit.clone(), + ); + let cluster_info_vote_listener = ClusterInfoVoteListener::new( exit.clone(), cluster_info.clone(), @@ -218,6 +274,15 @@ impl Tpu { cluster_confirmed_slot_sender, ); + let tip_manager = TipManager::new(tip_manager_config); + + let bundle_account_locker = BundleAccountLocker::default(); + + // tip accounts can't be used in BankingStage to avoid someone from stealing tips mid-slot. + // it also helps reduce surface area for potential account contention + let mut blacklisted_accounts = HashSet::new(); + blacklisted_accounts.insert(tip_manager.tip_payment_config_pubkey()); + blacklisted_accounts.extend(tip_manager.get_tip_accounts()); let banking_stage = BankingStage::new( block_production_method, cluster_info, @@ -225,10 +290,28 @@ impl Tpu { non_vote_receiver, tpu_vote_receiver, gossip_vote_receiver, + transaction_status_sender.clone(), + replay_vote_sender.clone(), + log_messages_bytes_limit, + connection_cache.clone(), + bank_forks.clone(), + prioritization_fee_cache, + blacklisted_accounts, + bundle_account_locker.clone(), + ); + + let bundle_stage = BundleStage::new( + cluster_info, + poh_recorder, + bundle_receiver, transaction_status_sender, replay_vote_sender, log_messages_bytes_limit, - connection_cache.clone(), + exit.clone(), + tip_manager, + bundle_account_locker, + &block_builder_fee_info, + preallocated_bundle_cost, bank_forks.clone(), prioritization_fee_cache, ); @@ -257,6 +340,7 @@ impl Tpu { bank_forks, shred_version, turbine_quic_endpoint_sender, + shred_receiver_address, ); Self { @@ -271,6 +355,10 @@ impl Tpu { tpu_entry_notifier, staked_nodes_updater_service, tracer_thread_hdl, + block_engine_stage, + relayer_stage, + fetch_stage_manager, + bundle_stage, } } @@ -284,6 +372,10 @@ impl Tpu { self.staked_nodes_updater_service.join(), self.tpu_quic_t.join(), self.tpu_forwards_quic_t.join(), + self.bundle_stage.join(), + self.relayer_stage.join(), + self.block_engine_stage.join(), + self.fetch_stage_manager.join(), ]; let broadcast_result = self.broadcast_stage.join(); for result in results { diff --git a/core/src/tpu_entry_notifier.rs b/core/src/tpu_entry_notifier.rs index 730a3b14fa..bc20696f5b 100644 --- a/core/src/tpu_entry_notifier.rs +++ b/core/src/tpu_entry_notifier.rs @@ -58,40 +58,54 @@ impl TpuEntryNotifier { current_slot: &mut u64, current_index: &mut usize, ) -> Result<(), RecvTimeoutError> { - let (bank, (entry, tick_height)) = entry_receiver.recv_timeout(Duration::from_secs(1))?; + let WorkingBankEntry { + bank, + entries_ticks, + } = entry_receiver.recv_timeout(Duration::from_secs(1))?; let slot = bank.slot(); - let index = if slot != *current_slot { - *current_index = 0; - *current_slot = slot; - 0 - } else { - *current_index += 1; - *current_index - }; - let entry_summary = EntrySummary { - num_hashes: entry.num_hashes, - hash: entry.hash, - num_transactions: entry.transactions.len() as u64, - }; - if let Err(err) = entry_notification_sender.send(EntryNotification { - slot, - index, - entry: entry_summary, - }) { - warn!( + let mut indices_sent = vec![]; + + entries_ticks.iter().for_each(|(entry, _)| { + let index = if slot != *current_slot { + *current_index = 0; + *current_slot = slot; + 0 + } else { + *current_index += 1; + *current_index + }; + + let entry_summary = EntrySummary { + num_hashes: entry.num_hashes, + hash: entry.hash, + num_transactions: entry.transactions.len() as u64, + }; + if let Err(err) = entry_notification_sender.send(EntryNotification { + slot, + index, + entry: entry_summary, + }) { + warn!( "Failed to send slot {slot:?} entry {index:?} from Tpu to EntryNotifierService, error {err:?}", ); - } + } + + indices_sent.push(index); + }); - if let Err(err) = broadcast_entry_sender.send((bank, (entry, tick_height))) { + if let Err(err) = broadcast_entry_sender.send(WorkingBankEntry { + bank, + entries_ticks, + }) { warn!( - "Failed to send slot {slot:?} entry {index:?} from Tpu to BroadcastStage, error {err:?}", + "Failed to send slot {slot:?} entries {indices_sent:?} from Tpu to BroadcastStage, error {err:?}", ); // If the BroadcastStage channel is closed, the validator has halted. Try to exit // gracefully. exit.store(true, Ordering::Relaxed); } + Ok(()) } diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 0fa619033a..09210c974b 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -139,6 +139,7 @@ impl Tvu { turbine_quic_endpoint_sender: AsyncSender<(SocketAddr, Bytes)>, turbine_quic_endpoint_receiver: Receiver<(Pubkey, SocketAddr, Bytes)>, repair_quic_endpoint_sender: AsyncSender, + shred_receiver_addr: Arc>>, ) -> Result { let TvuSockets { repair: repair_socket, @@ -187,6 +188,7 @@ impl Tvu { retransmit_receiver, max_slots.clone(), Some(rpc_subscriptions.clone()), + shred_receiver_addr, ); let cluster_slots = Arc::new(ClusterSlots::default()); @@ -492,6 +494,7 @@ pub mod tests { turbine_quic_endpoint_sender, turbine_quic_endpoint_receiver, repair_quic_endpoint_sender, + Arc::new(RwLock::new(None)), ) .expect("assume success"); exit.store(true, Ordering::Relaxed); diff --git a/core/src/validator.rs b/core/src/validator.rs index 550c1cd1c8..58c11bce36 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -16,6 +16,7 @@ use { }, ledger_metric_report_service::LedgerMetricReportService, poh_timing_report_service::PohTimingReportService, + proxy::{block_engine_stage::BlockEngineConfig, relayer_stage::RelayerConfig}, repair::{self, serve_repair::ServeRepair, serve_repair_service::ServeRepairService}, rewards_recorder_service::{RewardsRecorderSender, RewardsRecorderService}, sample_performance_service::SamplePerformanceService, @@ -25,6 +26,7 @@ use { system_monitor_service::{ verify_net_stats_access, SystemMonitorService, SystemMonitorStatsReportConfig, }, + tip_manager::TipManagerConfig, tpu::{Tpu, TpuSockets, DEFAULT_TPU_COALESCE}, tvu::{Tvu, TvuConfig, TvuSockets}, }, @@ -104,6 +106,10 @@ use { self, clean_orphaned_account_snapshot_dirs, move_and_async_delete_path_contents, }, }, + solana_runtime_plugin::{ + runtime_plugin_admin_rpc_service::RuntimePluginManagerRpcRequest, + runtime_plugin_service::RuntimePluginService, + }, solana_sdk::{ clock::Slot, epoch_schedule::MAX_LEADER_SCHEDULE_EPOCH_OFFSET, @@ -125,7 +131,7 @@ use { path::{Path, PathBuf}, sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, - Arc, RwLock, + Arc, Mutex, RwLock, }, thread::{sleep, Builder, JoinHandle}, time::{Duration, Instant}, @@ -260,6 +266,12 @@ pub struct ValidatorConfig { pub block_production_method: BlockProductionMethod, pub generator_config: Option, pub use_snapshot_archives_at_startup: UseSnapshotArchivesAtStartup, + pub relayer_config: Arc>, + pub block_engine_config: Arc>, + // Using Option inside RwLock is ugly, but only convenient way to allow toggle on/off + pub shred_receiver_address: Arc>>, + pub tip_manager_config: TipManagerConfig, + pub preallocated_bundle_cost: u64, } impl Default for ValidatorConfig { @@ -328,6 +340,11 @@ impl Default for ValidatorConfig { block_production_method: BlockProductionMethod::default(), generator_config: None, use_snapshot_archives_at_startup: UseSnapshotArchivesAtStartup::default(), + relayer_config: Arc::new(Mutex::new(RelayerConfig::default())), + block_engine_config: Arc::new(Mutex::new(BlockEngineConfig::default())), + shred_receiver_address: Arc::new(RwLock::new(None)), + tip_manager_config: TipManagerConfig::default(), + preallocated_bundle_cost: u64::default(), } } } @@ -491,6 +508,10 @@ impl Validator { tpu_connection_pool_size: usize, tpu_enable_udp: bool, admin_rpc_service_post_init: Arc>>, + runtime_plugin_configs_and_request_rx: Option<( + Vec, + Receiver, + )>, ) -> Result { let id = identity_keypair.pubkey(); assert_eq!(&id, node.info.pubkey()); @@ -875,6 +896,17 @@ impl Validator { None, )); + if let Some((runtime_plugin_configs, request_rx)) = runtime_plugin_configs_and_request_rx { + RuntimePluginService::start( + &runtime_plugin_configs, + request_rx, + bank_forks.clone(), + block_commitment_cache.clone(), + exit.clone(), + ) + .map_err(|e| format!("Failed to start runtime plugin service: {e:?}"))?; + } + let max_slots = Arc::new(MaxSlots::default()); let (completed_data_sets_sender, completed_data_sets_receiver) = bounded(MAX_COMPLETED_DATA_SETS_IN_CHANNEL); @@ -1081,6 +1113,9 @@ impl Validator { cluster_info: cluster_info.clone(), vote_account: *vote_account, repair_whitelist: config.repair_whitelist.clone(), + block_engine_config: config.block_engine_config.clone(), + relayer_config: config.relayer_config.clone(), + shred_receiver_address: config.shred_receiver_address.clone(), }); let waited_for_supermajority = wait_for_supermajority( @@ -1275,6 +1310,7 @@ impl Validator { turbine_quic_endpoint_sender.clone(), turbine_quic_endpoint_receiver, repair_quic_endpoint_sender, + config.shred_receiver_address.clone(), )?; let tpu = Tpu::new( @@ -1318,6 +1354,11 @@ impl Validator { &prioritization_fee_cache, config.block_production_method.clone(), config.generator_config.clone(), + config.block_engine_config.clone(), + config.relayer_config.clone(), + config.tip_manager_config.clone(), + config.shred_receiver_address.clone(), + config.preallocated_bundle_cost, ); let cluster_type = bank_forks.read().unwrap().root_bank().cluster_type(); @@ -1793,6 +1834,7 @@ fn load_blockstore( .map(|service| service.sender()), accounts_update_notifier, exit, + true, ); // Before replay starts, set the callbacks in each of the banks in BankForks so that @@ -2484,6 +2526,7 @@ mod tests { DEFAULT_TPU_CONNECTION_POOL_SIZE, DEFAULT_TPU_ENABLE_UDP, Arc::new(RwLock::new(None)), + None, ) .expect("assume successful validator start"); assert_eq!( @@ -2561,7 +2604,7 @@ mod tests { Arc::new(RwLock::new(vec![Arc::new(vote_account_keypair)])), vec![LegacyContactInfo::try_from(&leader_node.info).unwrap()], &config, - true, // should_check_duplicate_instance. + true, // should_check_duplicate_instance None, // rpc_to_plugin_manager_receiver Arc::new(RwLock::new(ValidatorStartProgress::default())), SocketAddrSpace::Unspecified, @@ -2569,6 +2612,7 @@ mod tests { DEFAULT_TPU_CONNECTION_POOL_SIZE, DEFAULT_TPU_ENABLE_UDP, Arc::new(RwLock::new(None)), + None, ) .expect("assume successful validator start") }) diff --git a/core/tests/epoch_accounts_hash.rs b/core/tests/epoch_accounts_hash.rs index 5cc507b155..6ce6563bed 100755 --- a/core/tests/epoch_accounts_hash.rs +++ b/core/tests/epoch_accounts_hash.rs @@ -444,6 +444,7 @@ fn test_snapshots_have_expected_epoch_accounts_hash() { if let Some(full_snapshot_archive_info) = snapshot_utils::get_highest_full_snapshot_archive_info( &snapshot_config.full_snapshot_archives_dir, + None, ) { if full_snapshot_archive_info.slot() == bank.slot() { @@ -563,6 +564,7 @@ fn test_background_services_request_handling_for_epoch_accounts_hash() { info!("Taking full snapshot..."); while snapshot_utils::get_highest_full_snapshot_archive_slot( &snapshot_config.full_snapshot_archives_dir, + None, ) != Some(bank.slot()) { trace!("waiting for full snapshot..."); diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index 9a862138a2..2980981536 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -557,6 +557,7 @@ fn test_concurrent_snapshot_packaging( // Wait until the package has been archived by SnapshotPackagerService while snapshot_utils::get_highest_full_snapshot_archive_slot( &full_snapshot_archives_dir, + None, ) .is_none() { @@ -1093,6 +1094,7 @@ fn test_snapshots_with_background_services( &snapshot_test_config .snapshot_config .full_snapshot_archives_dir, + None, ) != Some(slot) { assert!( @@ -1111,6 +1113,7 @@ fn test_snapshots_with_background_services( .snapshot_config .incremental_snapshot_archives_dir, last_full_snapshot_slot.unwrap(), + None, ) != Some(slot) { assert!( diff --git a/cost-model/src/cost_tracker.rs b/cost-model/src/cost_tracker.rs index 0841849bf1..c2dd74598e 100644 --- a/cost-model/src/cost_tracker.rs +++ b/cost-model/src/cost_tracker.rs @@ -108,6 +108,10 @@ impl CostTracker { self.vote_cost_limit = vote_cost_limit; } + pub fn set_block_cost_limit(&mut self, new_limit: u64) { + self.block_cost_limit = new_limit; + } + pub fn try_add(&mut self, tx_cost: &TransactionCost) -> Result { self.would_fit(tx_cost)?; self.add_transaction_cost(tx_cost); @@ -145,6 +149,10 @@ impl CostTracker { self.block_cost } + pub fn block_cost_limit(&self) -> u64 { + self.block_cost_limit + } + pub fn transaction_count(&self) -> u64 { self.transaction_count } diff --git a/deploy_programs b/deploy_programs new file mode 100755 index 0000000000..cbdf837e92 --- /dev/null +++ b/deploy_programs @@ -0,0 +1,17 @@ +#!/usr/bin/env sh +# Deploys the tip payment and tip distribution programs on local validator at predetermined address +set -eux + +WALLET_LOCATION=~/.config/solana/id.json + +# build this solana binary to ensure we're using a version compatible with the validator +cargo b --release --bin solana + +./target/release/solana airdrop -ul 1000 $WALLET_LOCATION + +(cd jito-programs/tip-payment && anchor build) + +# NOTE: make sure the declare_id! is set correctly in the programs +# Also, || true to make sure if fails the first time around, tip_payment can still be deployed +RUST_INFO=trace ./target/release/solana deploy --keypair $WALLET_LOCATION -ul ./jito-programs/tip-payment/target/deploy/tip_distribution.so ./jito-programs/tip-payment/dev/dev_tip_distribution.json || true +RUST_INFO=trace ./target/release/solana deploy --keypair $WALLET_LOCATION -ul ./jito-programs/tip-payment/target/deploy/tip_payment.so ./jito-programs/tip-payment/dev/dev_tip_payment.json diff --git a/dev/Dockerfile b/dev/Dockerfile new file mode 100644 index 0000000000..bab9a1c02f --- /dev/null +++ b/dev/Dockerfile @@ -0,0 +1,48 @@ +FROM rust:1.64-slim-bullseye as builder + +# Add Google Protocol Buffers for Libra's metrics library. +ENV PROTOC_VERSION 3.8.0 +ENV PROTOC_ZIP protoc-$PROTOC_VERSION-linux-x86_64.zip + +RUN set -x \ + && apt update \ + && apt install -y \ + clang \ + cmake \ + libudev-dev \ + make \ + unzip \ + libssl-dev \ + pkg-config \ + zlib1g-dev \ + curl \ + && rustup component add rustfmt \ + && rustup component add clippy \ + && rustc --version \ + && cargo --version \ + && curl -OL https://github.com/google/protobuf/releases/download/v$PROTOC_VERSION/$PROTOC_ZIP \ + && unzip -o $PROTOC_ZIP -d /usr/local bin/protoc \ + && unzip -o $PROTOC_ZIP -d /usr/local include/* \ + && rm -f $PROTOC_ZIP + + +WORKDIR /solana +COPY . . +RUN mkdir -p docker-output + +ARG ci_commit +# NOTE: Keep this here before build since variable is referenced during CI build step. +ENV CI_COMMIT=$ci_commit + +ARG debug + +# Uses docker buildkit to cache the image. +# /usr/local/cargo/git needed for crossbeam patch +RUN --mount=type=cache,mode=0777,target=/solana/target \ + --mount=type=cache,mode=0777,target=/usr/local/cargo/registry \ + --mount=type=cache,mode=0777,target=/usr/local/cargo/git \ + if [ "$debug" = "false" ] ; then \ + ./cargo stable build --release && cp target/release/solana* ./docker-output; \ + else \ + RUSTFLAGS='-g -C force-frame-pointers=yes' ./cargo stable build --release && cp target/release/solana* ./docker-output; \ + fi diff --git a/docs/src/cli/install-solana-cli-tools.md b/docs/src/cli/install-solana-cli-tools.md index 26a4ede5af..5c57115aff 100644 --- a/docs/src/cli/install-solana-cli-tools.md +++ b/docs/src/cli/install-solana-cli-tools.md @@ -17,11 +17,11 @@ depending on your preferred workflow: - Open your favorite Terminal application - Install the Solana release - [LATEST_SOLANA_RELEASE_VERSION](https://github.com/solana-labs/solana/releases/tag/LATEST_SOLANA_RELEASE_VERSION) on your - machine by running: + [LATEST_SOLANA_RELEASE_VERSION](https://github.com/jito-foundation/jito-solana/releases/tag/LATEST_SOLANA_RELEASE_VERSION) + on your machine by running: ```bash -sh -c "$(curl -sSfL https://release.solana.com/LATEST_SOLANA_RELEASE_VERSION/install)" +sh -c "$(curl -sSfL https://release.jito.wtf/LATEST_SOLANA_RELEASE_VERSION/install)" ``` - You can replace `LATEST_SOLANA_RELEASE_VERSION` with the release tag matching @@ -35,7 +35,7 @@ downloading LATEST_SOLANA_RELEASE_VERSION installer Configuration: /home/solana/.config/solana/install/config.yml Active release directory: /home/solana/.local/share/solana/install/active_release * Release version: LATEST_SOLANA_RELEASE_VERSION -* Release URL: https://github.com/solana-labs/solana/releases/download/LATEST_SOLANA_RELEASE_VERSION/solana-release-x86_64-unknown-linux-gnu.tar.bz2 +* Release URL: https://github.com/jito-foundation/jito-solana/releases/download/LATEST_SOLANA_RELEASE_VERSION/solana-release-x86_64-unknown-linux-gnu.tar.bz2 Update successful ``` @@ -72,7 +72,7 @@ solana --version installer into a temporary directory: ```bash -cmd /c "curl https://release.solana.com/LATEST_SOLANA_RELEASE_VERSION/solana-install-init-x86_64-pc-windows-msvc.exe --output C:\solana-install-tmp\solana-install-init.exe --create-dirs" +cmd /c "curl https://release.jito.wtf/LATEST_SOLANA_RELEASE_VERSION/solana-install-init-x86_64-pc-windows-msvc.exe --output C:\solana-install-tmp\solana-install-init.exe --create-dirs" ``` - Copy and paste the following command, then press Enter to install the latest @@ -106,7 +106,7 @@ manually download and install the binaries. ### Linux Download the binaries by navigating to -[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), +[https://github.com/jito-foundation/jito-solana/releases/latest](https://github.com/jito-foundation/jito-solana/releases/latest), download **solana-release-x86_64-unknown-linux-gnu.tar.bz2**, then extract the archive: @@ -119,7 +119,7 @@ export PATH=$PWD/bin:$PATH ### MacOS Download the binaries by navigating to -[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), +[https://github.com/jito-foundation/jito-solana/releases/latest](https://github.com/jito-foundation/jito-solana/releases/latest), download **solana-release-x86_64-apple-darwin.tar.bz2**, then extract the archive: @@ -132,7 +132,7 @@ export PATH=$PWD/bin:$PATH ### Windows - Download the binaries by navigating to - [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), + [https://github.com/jito-foundation/jito-solana/releases/latest](https://github.com/jito-foundation/jito-solana/releases/latest), download **solana-release-x86_64-pc-windows-msvc.tar.bz2**, then extract the archive using WinZip or similar. @@ -148,7 +148,7 @@ set PATH=%cd%/bin;%PATH% If you are unable to use the prebuilt binaries or prefer to build it yourself from source, navigate to -[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), +[https://github.com/jito-foundation/jito-solana/releases/latest](https://github.com/jito-foundation/jito-solana/releases/latest), and download the **Source Code** archive. Extract the code and build the binaries with: diff --git a/docs/src/cluster/bench-tps.md b/docs/src/cluster/bench-tps.md index d913f9e5f1..09af5ca3a1 100644 --- a/docs/src/cluster/bench-tps.md +++ b/docs/src/cluster/bench-tps.md @@ -6,7 +6,7 @@ The Solana git repository contains all the scripts you might need to spin up you For all four variations, you'd need the latest Rust toolchain and the Solana source code: -First, setup Rust, Cargo and system packages as described in the Solana [README](https://github.com/solana-labs/solana#1-install-rustc-cargo-and-rustfmt) +First, setup Rust, Cargo and system packages as described in the Solana [README](https://github.com/jito-foundation/jito-solana#1-install-rustc-cargo-and-rustfmt) Now checkout the code from github: diff --git a/entry/src/entry.rs b/entry/src/entry.rs index af3fdca951..aa23d02b75 100644 --- a/entry/src/entry.rs +++ b/entry/src/entry.rs @@ -231,7 +231,7 @@ pub fn hash_transactions(transactions: &[VersionedTransaction]) -> Hash { .iter() .flat_map(|tx| tx.signatures.iter()) .collect(); - let merkle_tree = MerkleTree::new(&signatures); + let merkle_tree = MerkleTree::new(&signatures, false); if let Some(root_hash) = merkle_tree.get_root() { *root_hash } else { diff --git a/entry/src/poh.rs b/entry/src/poh.rs index 31dd1abbb6..d54a81222c 100644 --- a/entry/src/poh.rs +++ b/entry/src/poh.rs @@ -72,19 +72,30 @@ impl Poh { } pub fn record(&mut self, mixin: Hash) -> Option { - if self.remaining_hashes == 1 { + let entries = self.record_bundle(&[mixin]); + entries.unwrap_or_default().pop() + } + + pub fn record_bundle(&mut self, mixins: &[Hash]) -> Option> { + if self.remaining_hashes <= mixins.len() as u64 { return None; // Caller needs to `tick()` first } - self.hash = hashv(&[self.hash.as_ref(), mixin.as_ref()]); - let num_hashes = self.num_hashes + 1; - self.num_hashes = 0; - self.remaining_hashes -= 1; + let entries = mixins + .iter() + .map(|m| { + self.hash = hashv(&[self.hash.as_ref(), m.as_ref()]); + let num_hashes = self.num_hashes + 1; + self.num_hashes = 0; + self.remaining_hashes -= 1; + PohEntry { + num_hashes, + hash: self.hash, + } + }) + .collect(); - Some(PohEntry { - num_hashes, - hash: self.hash, - }) + Some(entries) } pub fn tick(&mut self) -> Option { diff --git a/f b/f new file mode 100755 index 0000000000..e5fe635508 --- /dev/null +++ b/f @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +# Builds jito-solana in a docker container. +# Useful for running on machines that might not have cargo installed but can run docker (Flatcar Linux). +# run `./f true` to compile with debug flags + +set -eux + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)" + +GIT_SHA="$(git rev-parse --short HEAD)" + +echo "Git hash: $GIT_SHA" + +DEBUG_FLAGS=${1-false} + +DOCKER_BUILDKIT=1 docker build \ + --build-arg debug=$DEBUG_FLAGS \ + --build-arg ci_commit=$GIT_SHA \ + -t jitolabs/build-solana \ + -f dev/Dockerfile . \ + --progress=plain + +# Creates a temporary container, copies solana-validator built inside container there and +# removes the temporary container. +docker rm temp || true +docker container create --name temp jitolabs/build-solana +mkdir -p $SCRIPT_DIR/docker-output +# Outputs the solana-validator binary to $SOLANA/docker-output/solana-validator +docker container cp temp:/solana/docker-output $SCRIPT_DIR/ +docker rm temp diff --git a/fetch-spl.sh b/fetch-spl.sh index bb8e84ebb2..35bcefa2f8 100755 --- a/fetch-spl.sh +++ b/fetch-spl.sh @@ -13,8 +13,24 @@ fetch_program() { declare version=$2 declare address=$3 declare loader=$4 + declare repo=$5 - declare so=spl_$name-$version.so + case $repo in + "jito") + so=$name-$version.so + so_name="$name.so" + url="https://github.com/jito-foundation/jito-programs/releases/download/v$version/$so_name" + ;; + "solana") + so=spl_$name-$version.so + so_name="spl_${name//-/_}.so" + url="https://github.com/solana-labs/solana-program-library/releases/download/$name-v$version/$so_name" + ;; + *) + echo "Unsupported repo: $repo" + return 1 + ;; + esac if [[ $loader == "$upgradeableLoader" ]]; then genesis_args+=(--upgradeable-program "$address" "$loader" "$so" none) @@ -30,12 +46,11 @@ fetch_program() { cp ~/.cache/solana-spl/"$so" "$so" else echo "Downloading $name $version" - so_name="spl_${name//-/_}.so" ( set -x curl -L --retry 5 --retry-delay 2 --retry-connrefused \ -o "$so" \ - "https://github.com/solana-labs/solana-program-library/releases/download/$name-v$version/$so_name" + "$url" ) mkdir -p ~/.cache/solana-spl @@ -44,19 +59,25 @@ fetch_program() { } -fetch_program token 3.5.0 TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA BPFLoader2111111111111111111111111111111111 -fetch_program token-2022 0.9.0 TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb BPFLoaderUpgradeab1e11111111111111111111111 -fetch_program memo 1.0.0 Memo1UhkJRfHyvLMcVucJwxXeuD728EqVDDwQDxFMNo BPFLoader1111111111111111111111111111111111 -fetch_program memo 3.0.0 MemoSq4gqABAXKb96qnH8TysNcWxMyWCqXgDLGmfcHr BPFLoader2111111111111111111111111111111111 -fetch_program associated-token-account 1.1.2 ATokenGPvbdGVxr1b2hvZbsiqW5xWH25efTNsLJA8knL BPFLoader2111111111111111111111111111111111 -fetch_program feature-proposal 1.0.0 Feat1YXHhH6t1juaWF74WLcfv4XoNocjXA6sPWHNgAse BPFLoader2111111111111111111111111111111111 +fetch_program token 3.5.0 TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA BPFLoader2111111111111111111111111111111111 solana +fetch_program token-2022 0.6.0 TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb BPFLoaderUpgradeab1e11111111111111111111111 solana +fetch_program memo 1.0.0 Memo1UhkJRfHyvLMcVucJwxXeuD728EqVDDwQDxFMNo BPFLoader1111111111111111111111111111111111 solana +fetch_program memo 3.0.0 MemoSq4gqABAXKb96qnH8TysNcWxMyWCqXgDLGmfcHr BPFLoader2111111111111111111111111111111111 solana +fetch_program associated-token-account 1.1.2 ATokenGPvbdGVxr1b2hvZbsiqW5xWH25efTNsLJA8knL BPFLoader2111111111111111111111111111111111 solana +fetch_program feature-proposal 1.0.0 Feat1YXHhH6t1juaWF74WLcfv4XoNocjXA6sPWHNgAse BPFLoader2111111111111111111111111111111111 solana +# jito programs +fetch_program jito_tip_payment 0.1.4 T1pyyaTNZsKv2WcRAB8oVnk93mLJw2XzjtVYqCsaHqt BPFLoaderUpgradeab1e11111111111111111111111 jito +fetch_program jito_tip_distribution 0.1.4 4R3gSG8BpU4t19KYj8CfnbtRpnT8gtk4dvTHxVRwc2r7 BPFLoaderUpgradeab1e11111111111111111111111 jito -echo "${genesis_args[@]}" > spl-genesis-args.sh +echo "${genesis_args[@]}" >spl-genesis-args.sh echo echo "Available SPL programs:" ls -l spl_*.so +echo "Available Jito programs:" +ls -l jito*.so + echo echo "solana-genesis command-line arguments (spl-genesis-args.sh):" cat spl-genesis-args.sh diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 4e4193a19f..32e7be3a74 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -541,6 +541,10 @@ impl ClusterInfo { *self.entrypoints.write().unwrap() = entrypoints; } + pub fn set_my_contact_info(&self, my_contact_info: ContactInfo) { + *self.my_contact_info.write().unwrap() = my_contact_info; + } + pub fn save_contact_info(&self) { let nodes = { let entrypoint_gossip_addrs = self diff --git a/install/solana-install-init.sh b/install/solana-install-init.sh index db36dc61e2..0c228a8b59 100755 --- a/install/solana-install-init.sh +++ b/install/solana-install-init.sh @@ -16,9 +16,9 @@ { # this ensures the entire script is downloaded # if [ -z "$SOLANA_DOWNLOAD_ROOT" ]; then - SOLANA_DOWNLOAD_ROOT="https://github.com/solana-labs/solana/releases/download/" + SOLANA_DOWNLOAD_ROOT="https://github.com/jito-foundation/jito-solana/releases/download/" fi -GH_LATEST_RELEASE="https://api.github.com/repos/solana-labs/solana/releases/latest" +GH_LATEST_RELEASE="https://api.github.com/repos/jito-foundation/jito-solana/releases/latest" set -e diff --git a/install/src/command.rs b/install/src/command.rs index ac53f5fe2b..9870a27f7e 100644 --- a/install/src/command.rs +++ b/install/src/command.rs @@ -501,7 +501,6 @@ fn add_to_path(new_path: &str) -> bool { fn append_file(dest: &Path, line: &str) -> io::Result<()> { use std::io::Write; let mut dest_file = fs::OpenOptions::new() - .write(true) .append(true) .create(true) .open(dest)?; @@ -573,7 +572,7 @@ pub fn init( fn github_release_download_url(release_semver: &str) -> String { format!( - "https://github.com/solana-labs/solana/releases/download/v{}/solana-release-{}.tar.bz2", + "https://github.com/jito-foundation/jito-solana/releases/download/v{}/solana-release-{}.tar.bz2", release_semver, crate::build_env::TARGET ) @@ -581,7 +580,7 @@ fn github_release_download_url(release_semver: &str) -> String { fn release_channel_download_url(release_channel: &str) -> String { format!( - "https://release.solana.com/{}/solana-release-{}.tar.bz2", + "https://release.jito.wtf/{}/solana-release-{}.tar.bz2", release_channel, crate::build_env::TARGET ) @@ -589,7 +588,7 @@ fn release_channel_download_url(release_channel: &str) -> String { fn release_channel_version_url(release_channel: &str) -> String { format!( - "https://release.solana.com/{}/solana-release-{}.yml", + "https://release.jito.wtf/{}/solana-release-{}.yml", release_channel, crate::build_env::TARGET ) @@ -906,7 +905,7 @@ fn check_for_newer_github_release( while page == 1 || releases.len() == PER_PAGE { let url = reqwest::Url::parse_with_params( - "https://api.github.com/repos/solana-labs/solana/releases", + "https://api.github.com/repos/jito-foundation/jito-solana/releases", &[ ("per_page", &format!("{PER_PAGE}")), ("page", &format!("{page}")), @@ -968,58 +967,62 @@ pub fn update(config_file: &str, check_only: bool) -> Result { pub fn init_or_update(config_file: &str, is_init: bool, check_only: bool) -> Result { let mut config = Config::load(config_file)?; - let semver_update_type = if is_init { - SemverUpdateType::Fixed - } else { - SemverUpdateType::Patch - }; - let (updated_version, download_url_and_sha256, release_dir) = if let Some(explicit_release) = &config.explicit_release { match explicit_release { ExplicitRelease::Semver(current_release_semver) => { - let progress_bar = new_spinner_progress_bar(); - progress_bar.set_message(format!("{LOOKING_GLASS}Checking for updates...")); - - let github_release = check_for_newer_github_release( - current_release_semver, - semver_update_type, - is_init, - )?; - - progress_bar.finish_and_clear(); + let release_dir = config.release_dir(current_release_semver); + if is_init && release_dir.exists() { + (current_release_semver.to_owned(), None, release_dir) + } else { + let progress_bar = new_spinner_progress_bar(); + progress_bar.set_message(format!("{LOOKING_GLASS}Checking for updates...")); - match github_release { - None => { - return Err(format!("Unknown release: {current_release_semver}")); - } - Some(release_semver) => { - if release_semver == *current_release_semver { - if let Ok(active_release_version) = load_release_version( - &config.active_release_dir().join("version.yml"), - ) { - if format!("v{current_release_semver}") - == active_release_version.channel - { - println!( + let semver_update_type = if is_init { + SemverUpdateType::Fixed + } else { + SemverUpdateType::Patch + }; + let github_release = check_for_newer_github_release( + current_release_semver, + semver_update_type, + is_init, + )?; + + progress_bar.finish_and_clear(); + + match github_release { + None => { + return Err(format!("Unknown release: {current_release_semver}")); + } + Some(release_semver) => { + if release_semver == *current_release_semver { + if let Ok(active_release_version) = load_release_version( + &config.active_release_dir().join("version.yml"), + ) { + if format!("v{current_release_semver}") + == active_release_version.channel + { + println!( "Install is up to date. {release_semver} is the latest compatible release" ); - return Ok(false); + return Ok(false); + } } } + config.explicit_release = + Some(ExplicitRelease::Semver(release_semver.clone())); + + let release_dir = config.release_dir(&release_semver); + let download_url_and_sha256 = if release_dir.exists() { + // Release already present in the cache + None + } else { + Some((github_release_download_url(&release_semver), None)) + }; + (release_semver, download_url_and_sha256, release_dir) } - config.explicit_release = - Some(ExplicitRelease::Semver(release_semver.clone())); - - let release_dir = config.release_dir(&release_semver); - let download_url_and_sha256 = if release_dir.exists() { - // Release already present in the cache - None - } else { - Some((github_release_download_url(&release_semver), None)) - }; - (release_semver, download_url_and_sha256, release_dir) } } } diff --git a/jito-programs b/jito-programs new file mode 160000 index 0000000000..4a23d81bd6 --- /dev/null +++ b/jito-programs @@ -0,0 +1 @@ +Subproject commit 4a23d81bd621084d3b8596b35e0457b954a74f98 diff --git a/jito-protos/Cargo.toml b/jito-protos/Cargo.toml new file mode 100644 index 0000000000..f9f0b5baa3 --- /dev/null +++ b/jito-protos/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "jito-protos" +version = { workspace = true } +edition = { workspace = true } +publish = false + +[dependencies] +bytes = { workspace = true } +prost = { workspace = true } +prost-types = { workspace = true } +tonic = { workspace = true } + +[build-dependencies] +tonic-build = { workspace = true } + +# windows users should install the protobuf compiler manually and set the PROTOC +# envar to point to the installed binary +[target."cfg(not(windows))".build-dependencies] +protobuf-src = { workspace = true } diff --git a/jito-protos/build.rs b/jito-protos/build.rs new file mode 100644 index 0000000000..30ece1620a --- /dev/null +++ b/jito-protos/build.rs @@ -0,0 +1,38 @@ +use tonic_build::configure; + +fn main() -> Result<(), std::io::Error> { + const PROTOC_ENVAR: &str = "PROTOC"; + if std::env::var(PROTOC_ENVAR).is_err() { + #[cfg(not(windows))] + std::env::set_var(PROTOC_ENVAR, protobuf_src::protoc()); + } + + let proto_base_path = std::path::PathBuf::from("protos"); + let proto_files = [ + "auth.proto", + "block_engine.proto", + "bundle.proto", + "packet.proto", + "relayer.proto", + "shared.proto", + ]; + let mut protos = Vec::new(); + for proto_file in &proto_files { + let proto = proto_base_path.join(proto_file); + println!("cargo:rerun-if-changed={}", proto.display()); + protos.push(proto); + } + + configure() + .build_client(true) + .build_server(false) + .type_attribute( + "TransactionErrorType", + "#[cfg_attr(test, derive(enum_iterator::Sequence))]", + ) + .type_attribute( + "InstructionErrorType", + "#[cfg_attr(test, derive(enum_iterator::Sequence))]", + ) + .compile(&protos, &[proto_base_path]) +} diff --git a/jito-protos/protos b/jito-protos/protos new file mode 160000 index 0000000000..05d210980f --- /dev/null +++ b/jito-protos/protos @@ -0,0 +1 @@ +Subproject commit 05d210980f34a7c974d7ed1a4dbcb2ce1fca00b3 diff --git a/jito-protos/src/lib.rs b/jito-protos/src/lib.rs new file mode 100644 index 0000000000..cf630c53d2 --- /dev/null +++ b/jito-protos/src/lib.rs @@ -0,0 +1,25 @@ +pub mod proto { + pub mod auth { + tonic::include_proto!("auth"); + } + + pub mod block_engine { + tonic::include_proto!("block_engine"); + } + + pub mod bundle { + tonic::include_proto!("bundle"); + } + + pub mod packet { + tonic::include_proto!("packet"); + } + + pub mod relayer { + tonic::include_proto!("relayer"); + } + + pub mod shared { + tonic::include_proto!("shared"); + } +} diff --git a/ledger-tool/src/ledger_utils.rs b/ledger-tool/src/ledger_utils.rs index 6514312bc5..6932e89024 100644 --- a/ledger-tool/src/ledger_utils.rs +++ b/ledger-tool/src/ledger_utils.rs @@ -71,6 +71,7 @@ pub fn load_and_process_ledger( process_options: ProcessOptions, snapshot_archive_path: Option, incremental_snapshot_archive_path: Option, + ignore_halt_at_slot_for_snapshot_loading: bool, ) -> Result<(Arc>, Option), BlockstoreProcessorError> { let bank_snapshots_dir = if blockstore.is_primary_access() { blockstore.ledger_path().join("snapshot") @@ -81,6 +82,12 @@ pub fn load_and_process_ledger( .join("snapshot") }; + let snapshot_halt_at_slot = if ignore_halt_at_slot_for_snapshot_loading { + None + } else { + process_options.halt_at_slot + }; + let mut starting_slot = 0; // default start check with genesis let snapshot_config = if arg_matches.is_present("no_snapshot") { None @@ -89,13 +96,15 @@ pub fn load_and_process_ledger( snapshot_archive_path.unwrap_or_else(|| blockstore.ledger_path().to_path_buf()); let incremental_snapshot_archives_dir = incremental_snapshot_archive_path.unwrap_or_else(|| full_snapshot_archives_dir.clone()); - if let Some(full_snapshot_slot) = - snapshot_utils::get_highest_full_snapshot_archive_slot(&full_snapshot_archives_dir) - { + if let Some(full_snapshot_slot) = snapshot_utils::get_highest_full_snapshot_archive_slot( + &full_snapshot_archives_dir, + snapshot_halt_at_slot, + ) { let incremental_snapshot_slot = snapshot_utils::get_highest_incremental_snapshot_archive_slot( &incremental_snapshot_archives_dir, full_snapshot_slot, + snapshot_halt_at_slot, ) .unwrap_or_default(); starting_slot = std::cmp::max(full_snapshot_slot, incremental_snapshot_slot); @@ -245,6 +254,7 @@ pub fn load_and_process_ledger( None, // Maybe support this later, though accounts_update_notifier, exit.clone(), + ignore_halt_at_slot_for_snapshot_loading, ); let block_verification_method = value_t!( arg_matches, diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index aeed15fc9c..47b6a5e152 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -2344,6 +2344,7 @@ fn main() { process_options, snapshot_archive_path, incremental_snapshot_archive_path, + true, ) { Ok((bank_forks, ..)) => { println!( @@ -2430,6 +2431,7 @@ fn main() { process_options, snapshot_archive_path, incremental_snapshot_archive_path, + true, ) { Ok((bank_forks, ..)) => { println!("{}", &bank_forks.read().unwrap().working_bank().hash()); @@ -2682,6 +2684,7 @@ fn main() { process_options, snapshot_archive_path, incremental_snapshot_archive_path, + true, ) .unwrap_or_else(|err| { eprintln!("Ledger verification failed: {err:?}"); @@ -2740,6 +2743,7 @@ fn main() { process_options, snapshot_archive_path, incremental_snapshot_archive_path, + true, ) { Ok((bank_forks, ..)) => { let dot = graph_forks(&bank_forks.read().unwrap(), &graph_config); @@ -2888,6 +2892,21 @@ fn main() { } process_options.halt_at_slot = Some(snapshot_slot); + if let Ok(metas) = blockstore.slot_meta_iterator(0) { + let slots: Vec<_> = metas.map(|(slot, _)| slot).collect(); + if slots.is_empty() { + eprintln!("Ledger is empty, can't create snapshot"); + exit(1); + } else { + let first = slots.first().unwrap(); + let last = slots.last().unwrap_or(first); + if first > &snapshot_slot || &snapshot_slot > last { + eprintln!("Slot {} is out of bounds of ledger [{}, {}], cannot create snapshot", &snapshot_slot, first, last); + exit(1); + } + } + } + let ending_slot = if is_minimized { let ending_slot = value_t_or_exit!(arg_matches, "ending_slot", Slot); if ending_slot <= snapshot_slot { @@ -2924,6 +2943,7 @@ fn main() { process_options, snapshot_archive_path, incremental_snapshot_archive_path, + false, // want to load snapshots <= halt_at_slot ) { Ok((bank_forks, starting_snapshot_hashes)) => { let mut bank = bank_forks @@ -3295,6 +3315,7 @@ fn main() { process_options, snapshot_archive_path, incremental_snapshot_archive_path, + true, ) .unwrap_or_else(|err| { eprintln!("Failed to load ledger: {err:?}"); @@ -3389,6 +3410,7 @@ fn main() { process_options, snapshot_archive_path, incremental_snapshot_archive_path, + true, ) { Ok((bank_forks, ..)) => { let bank_forks = bank_forks.read().unwrap(); diff --git a/ledger-tool/src/program.rs b/ledger-tool/src/program.rs index ba86b1de99..5cf91065f7 100644 --- a/ledger-tool/src/program.rs +++ b/ledger-tool/src/program.rs @@ -127,6 +127,7 @@ fn load_blockstore(ledger_path: &Path, arg_matches: &ArgMatches<'_>) -> Arc, accounts_update_notifier: Option, exit: Arc, + ignore_halt_at_slot_for_snapshot_loading: bool, ) -> ( Arc>, LeaderScheduleCache, @@ -102,6 +104,8 @@ pub fn load_bank_forks( ) { fn get_snapshots_to_load( snapshot_config: Option<&SnapshotConfig>, + halt_at_slot: Option, + ignore_halt_at_slot_for_snapshot_loading: bool, ) -> Option<( FullSnapshotArchiveInfo, Option, @@ -111,9 +115,16 @@ pub fn load_bank_forks( return None; }; + let halt_at_slot = if ignore_halt_at_slot_for_snapshot_loading { + None + } else { + halt_at_slot + }; + let Some(full_snapshot_archive_info) = snapshot_utils::get_highest_full_snapshot_archive_info( &snapshot_config.full_snapshot_archives_dir, + halt_at_slot, ) else { warn!( @@ -127,6 +138,7 @@ pub fn load_bank_forks( snapshot_utils::get_highest_incremental_snapshot_archive_info( &snapshot_config.incremental_snapshot_archives_dir, full_snapshot_archive_info.slot(), + halt_at_slot, ); Some(( @@ -137,7 +149,11 @@ pub fn load_bank_forks( let (bank_forks, starting_snapshot_hashes) = if let Some((full_snapshot_archive_info, incremental_snapshot_archive_info)) = - get_snapshots_to_load(snapshot_config) + get_snapshots_to_load( + snapshot_config, + process_options.halt_at_slot, + ignore_halt_at_slot_for_snapshot_loading, + ) { // SAFETY: Having snapshots to load ensures a snapshot config let snapshot_config = snapshot_config.unwrap(); @@ -206,7 +222,7 @@ pub fn load_bank_forks( } #[allow(clippy::too_many_arguments)] -fn bank_forks_from_snapshot( +pub fn bank_forks_from_snapshot( full_snapshot_archive_info: FullSnapshotArchiveInfo, incremental_snapshot_archive_info: Option, genesis_config: &GenesisConfig, diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index ff30c87b7e..1d3a192d3a 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -151,7 +151,7 @@ fn execute_batch( let mut mint_decimals: HashMap = HashMap::new(); let pre_token_balances = if record_token_balances { - collect_token_balances(bank, batch, &mut mint_decimals) + collect_token_balances(bank, batch, &mut mint_decimals, None) } else { vec![] }; @@ -189,7 +189,7 @@ fn execute_batch( if let Some(transaction_status_sender) = transaction_status_sender { let transactions = batch.sanitized_transactions().to_vec(); let post_token_balances = if record_token_balances { - collect_token_balances(bank, batch, &mut mint_decimals) + collect_token_balances(bank, batch, &mut mint_decimals, None) } else { vec![] }; @@ -671,6 +671,7 @@ pub fn test_process_blockstore( None, None, exit, + true, ); process_blockstore_from_root( diff --git a/ledger/src/token_balances.rs b/ledger/src/token_balances.rs index 204bd43359..8926fb9a04 100644 --- a/ledger/src/token_balances.rs +++ b/ledger/src/token_balances.rs @@ -2,6 +2,7 @@ use { solana_account_decoder::parse_token::{ is_known_spl_token_id, token_amount_to_ui_amount, UiTokenAmount, }, + solana_accounts_db::account_overrides::AccountOverrides, solana_measure::measure::Measure, solana_metrics::datapoint_debug, solana_runtime::{bank::Bank, transaction_batch::TransactionBatch}, @@ -38,6 +39,7 @@ pub fn collect_token_balances( bank: &Bank, batch: &TransactionBatch, mint_decimals: &mut HashMap, + cached_accounts: Option<&AccountOverrides>, ) -> TransactionTokenBalances { let mut balances: TransactionTokenBalances = vec![]; let mut collect_time = Measure::start("collect_token_balances"); @@ -58,8 +60,12 @@ pub fn collect_token_balances( ui_token_amount, owner, program_id, - }) = collect_token_balance_from_account(bank, account_id, mint_decimals) - { + }) = collect_token_balance_from_account( + bank, + account_id, + mint_decimals, + cached_accounts, + ) { transaction_balances.push(TransactionTokenBalance { account_index: index as u8, mint, @@ -92,8 +98,17 @@ fn collect_token_balance_from_account( bank: &Bank, account_id: &Pubkey, mint_decimals: &mut HashMap, + account_overrides: Option<&AccountOverrides>, ) -> Option { - let account = bank.get_account(account_id)?; + let account = { + if let Some(account_override) = + account_overrides.and_then(|overrides| overrides.get(account_id)) + { + Some(account_override.clone()) + } else { + bank.get_account(account_id) + } + }?; if !is_known_spl_token_id(account.owner()) { return None; @@ -235,13 +250,13 @@ mod test { // Account is not owned by spl_token (nor does it have TokenAccount state) assert_eq!( - collect_token_balance_from_account(&bank, &account_pubkey, &mut mint_decimals), + collect_token_balance_from_account(&bank, &account_pubkey, &mut mint_decimals, None), None ); // Mint does not have TokenAccount state assert_eq!( - collect_token_balance_from_account(&bank, &mint_pubkey, &mut mint_decimals), + collect_token_balance_from_account(&bank, &mint_pubkey, &mut mint_decimals, None), None ); @@ -250,7 +265,8 @@ mod test { collect_token_balance_from_account( &bank, &spl_token_account_pubkey, - &mut mint_decimals + &mut mint_decimals, + None ), Some(TokenBalanceData { mint: mint_pubkey.to_string(), @@ -267,7 +283,12 @@ mod test { // TokenAccount is not owned by known spl-token program_id assert_eq!( - collect_token_balance_from_account(&bank, &other_account_pubkey, &mut mint_decimals), + collect_token_balance_from_account( + &bank, + &other_account_pubkey, + &mut mint_decimals, + None + ), None ); @@ -276,7 +297,8 @@ mod test { collect_token_balance_from_account( &bank, &other_mint_account_pubkey, - &mut mint_decimals + &mut mint_decimals, + None ), None ); @@ -429,13 +451,13 @@ mod test { // Account is not owned by spl_token (nor does it have TokenAccount state) assert_eq!( - collect_token_balance_from_account(&bank, &account_pubkey, &mut mint_decimals), + collect_token_balance_from_account(&bank, &account_pubkey, &mut mint_decimals, None), None ); // Mint does not have TokenAccount state assert_eq!( - collect_token_balance_from_account(&bank, &mint_pubkey, &mut mint_decimals), + collect_token_balance_from_account(&bank, &mint_pubkey, &mut mint_decimals, None), None ); @@ -444,7 +466,8 @@ mod test { collect_token_balance_from_account( &bank, &spl_token_account_pubkey, - &mut mint_decimals + &mut mint_decimals, + None ), Some(TokenBalanceData { mint: mint_pubkey.to_string(), @@ -461,7 +484,12 @@ mod test { // TokenAccount is not owned by known spl-token program_id assert_eq!( - collect_token_balance_from_account(&bank, &other_account_pubkey, &mut mint_decimals), + collect_token_balance_from_account( + &bank, + &other_account_pubkey, + &mut mint_decimals, + None + ), None ); @@ -470,7 +498,8 @@ mod test { collect_token_balance_from_account( &bank, &other_mint_account_pubkey, - &mut mint_decimals + &mut mint_decimals, + None ), None ); diff --git a/local-cluster/src/local_cluster.rs b/local-cluster/src/local_cluster.rs index d180a4abaf..71d3a9d00c 100644 --- a/local-cluster/src/local_cluster.rs +++ b/local-cluster/src/local_cluster.rs @@ -295,6 +295,7 @@ impl LocalCluster { DEFAULT_TPU_CONNECTION_POOL_SIZE, DEFAULT_TPU_ENABLE_UDP, Arc::new(RwLock::new(None)), + None, ) .expect("assume successful validator start"); @@ -510,6 +511,7 @@ impl LocalCluster { DEFAULT_TPU_CONNECTION_POOL_SIZE, DEFAULT_TPU_ENABLE_UDP, Arc::new(RwLock::new(None)), + None, ) .expect("assume successful validator start"); @@ -907,6 +909,7 @@ impl Cluster for LocalCluster { DEFAULT_TPU_CONNECTION_POOL_SIZE, DEFAULT_TPU_ENABLE_UDP, Arc::new(RwLock::new(None)), + None, ) .expect("assume successful validator start"); cluster_validator_info.validator = Some(restarted_node); diff --git a/local-cluster/src/local_cluster_snapshot_utils.rs b/local-cluster/src/local_cluster_snapshot_utils.rs index 259b9e1559..2ebd90e86e 100644 --- a/local-cluster/src/local_cluster_snapshot_utils.rs +++ b/local-cluster/src/local_cluster_snapshot_utils.rs @@ -90,7 +90,10 @@ impl LocalCluster { let timer = Instant::now(); let next_snapshot = loop { if let Some(full_snapshot_archive_info) = - snapshot_utils::get_highest_full_snapshot_archive_info(&full_snapshot_archives_dir) + snapshot_utils::get_highest_full_snapshot_archive_info( + &full_snapshot_archives_dir, + None, + ) { match next_snapshot_type { NextSnapshotType::FullSnapshot => { @@ -103,6 +106,7 @@ impl LocalCluster { snapshot_utils::get_highest_incremental_snapshot_archive_info( incremental_snapshot_archives_dir.as_ref().unwrap(), full_snapshot_archive_info.slot(), + None, ) { if incremental_snapshot_archive_info.slot() >= last_slot { diff --git a/local-cluster/src/validator_configs.rs b/local-cluster/src/validator_configs.rs index 627c5797cd..93c7c9a4a7 100644 --- a/local-cluster/src/validator_configs.rs +++ b/local-cluster/src/validator_configs.rs @@ -69,6 +69,11 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig { block_production_method: config.block_production_method.clone(), generator_config: config.generator_config.clone(), use_snapshot_archives_at_startup: config.use_snapshot_archives_at_startup, + relayer_config: config.relayer_config.clone(), + block_engine_config: config.block_engine_config.clone(), + shred_receiver_address: config.shred_receiver_address.clone(), + tip_manager_config: config.tip_manager_config.clone(), + preallocated_bundle_cost: config.preallocated_bundle_cost, } } diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index fc08389947..4b9c6c879b 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -870,6 +870,7 @@ fn test_incremental_snapshot_download_with_crossing_full_snapshot_interval_at_st validator_snapshot_test_config .full_snapshot_archives_dir .path(), + None, ) .unwrap(); info!( @@ -909,6 +910,7 @@ fn test_incremental_snapshot_download_with_crossing_full_snapshot_interval_at_st .incremental_snapshot_archives_dir .path(), full_snapshot_archive.slot(), + None, ) .unwrap(); info!( @@ -1051,6 +1053,7 @@ fn test_incremental_snapshot_download_with_crossing_full_snapshot_interval_at_st validator_snapshot_test_config .full_snapshot_archives_dir .path(), + None, ) .unwrap(); @@ -1117,6 +1120,7 @@ fn test_incremental_snapshot_download_with_crossing_full_snapshot_interval_at_st validator_snapshot_test_config .full_snapshot_archives_dir .path(), + None, ) .unwrap(); @@ -1145,6 +1149,7 @@ fn test_incremental_snapshot_download_with_crossing_full_snapshot_interval_at_st validator_snapshot_test_config .full_snapshot_archives_dir .path(), + None, ) { if full_snapshot_slot >= validator_next_full_snapshot_slot { if let Some(incremental_snapshot_slot) = @@ -1153,6 +1158,7 @@ fn test_incremental_snapshot_download_with_crossing_full_snapshot_interval_at_st .incremental_snapshot_archives_dir .path(), full_snapshot_slot, + None, ) { if incremental_snapshot_slot >= validator_next_incremental_snapshot_slot { @@ -1346,8 +1352,10 @@ fn test_snapshots_blockstore_floor() { trace!("Waiting for snapshot tar to be generated with slot",); let archive_info = loop { - let archive = - snapshot_utils::get_highest_full_snapshot_archive_info(full_snapshot_archives_dir); + let archive = snapshot_utils::get_highest_full_snapshot_archive_info( + full_snapshot_archives_dir, + None, + ); if archive.is_some() { trace!("snapshot exists"); break archive.unwrap(); @@ -4898,11 +4906,13 @@ fn test_boot_from_local_state() { let bank_snapshot = loop { if let Some(full_snapshot_slot) = snapshot_utils::get_highest_full_snapshot_archive_slot( &validator2_config.full_snapshot_archives_dir, + None, ) { if let Some(incremental_snapshot_slot) = snapshot_utils::get_highest_incremental_snapshot_archive_slot( &validator2_config.incremental_snapshot_archives_dir, full_snapshot_slot, + None, ) { if let Some(bank_snapshot) = snapshot_utils::get_highest_bank_snapshot_post( @@ -5002,12 +5012,14 @@ fn test_boot_from_local_state() { if let Some(other_full_snapshot_slot) = snapshot_utils::get_highest_full_snapshot_archive_slot( &other_validator_config.full_snapshot_archives_dir, + None, ) { let other_incremental_snapshot_slot = snapshot_utils::get_highest_incremental_snapshot_archive_slot( &other_validator_config.incremental_snapshot_archives_dir, other_full_snapshot_slot, + None, ); if other_full_snapshot_slot >= full_snapshot_archive.slot() && other_incremental_snapshot_slot >= Some(incremental_snapshot_archive.slot()) diff --git a/merkle-tree/src/merkle_tree.rs b/merkle-tree/src/merkle_tree.rs index 09285a41e7..57bceea1ec 100644 --- a/merkle-tree/src/merkle_tree.rs +++ b/merkle-tree/src/merkle_tree.rs @@ -18,7 +18,7 @@ macro_rules! hash_intermediate { } } -#[derive(Debug)] +#[derive(Default, Debug, Eq, Hash, PartialEq)] pub struct MerkleTree { leaf_count: usize, nodes: Vec, @@ -36,6 +36,14 @@ impl<'a> ProofEntry<'a> { assert!(left_sibling.is_none() ^ right_sibling.is_none()); Self(target, left_sibling, right_sibling) } + + pub fn get_left_sibling(&self) -> Option<&'a Hash> { + self.1 + } + + pub fn get_right_sibling(&self) -> Option<&'a Hash> { + self.2 + } } #[derive(Debug, Default, PartialEq, Eq)] @@ -60,6 +68,10 @@ impl<'a> Proof<'a> { }); result.is_some() } + + pub fn get_proof_entries(self) -> Vec> { + self.0 + } } impl MerkleTree { @@ -95,7 +107,7 @@ impl MerkleTree { } } - pub fn new>(items: &[T]) -> Self { + pub fn new>(items: &[T], sorted_hashes: bool) -> Self { let cap = MerkleTree::calculate_vec_capacity(items.len()); let mut mt = MerkleTree { leaf_count: items.len(), @@ -123,8 +135,20 @@ impl MerkleTree { &mt.nodes[prev_level_start + prev_level_idx] }; - let hash = hash_intermediate!(lsib, rsib); - mt.nodes.push(hash); + // tip-distribution verification uses sorted hashing + if sorted_hashes { + if lsib <= rsib { + let hash = hash_intermediate!(lsib, rsib); + mt.nodes.push(hash); + } else { + let hash = hash_intermediate!(rsib, lsib); + mt.nodes.push(hash); + } + } else { + // hashing for solana internals + let hash = hash_intermediate!(lsib, rsib); + mt.nodes.push(hash); + } } prev_level_start = level_start; prev_level_len = level_len; @@ -189,21 +213,21 @@ mod tests { #[test] fn test_tree_from_empty() { - let mt = MerkleTree::new::<[u8; 0]>(&[]); + let mt = MerkleTree::new::<[u8; 0]>(&[], false); assert_eq!(mt.get_root(), None); } #[test] fn test_tree_from_one() { let input = b"test"; - let mt = MerkleTree::new(&[input]); + let mt = MerkleTree::new(&[input], false); let expected = hash_leaf!(input); assert_eq!(mt.get_root(), Some(&expected)); } #[test] fn test_tree_from_many() { - let mt = MerkleTree::new(TEST); + let mt = MerkleTree::new(TEST, false); // This golden hash will need to be updated whenever the contents of `TEST` change in any // way, including addition, removal and reordering or any of the tree calculation algo // changes @@ -215,7 +239,7 @@ mod tests { #[test] fn test_path_creation() { - let mt = MerkleTree::new(TEST); + let mt = MerkleTree::new(TEST, false); for (i, _s) in TEST.iter().enumerate() { let _path = mt.find_path(i).unwrap(); } @@ -223,13 +247,13 @@ mod tests { #[test] fn test_path_creation_bad_index() { - let mt = MerkleTree::new(TEST); + let mt = MerkleTree::new(TEST, false); assert_eq!(mt.find_path(TEST.len()), None); } #[test] fn test_path_verify_good() { - let mt = MerkleTree::new(TEST); + let mt = MerkleTree::new(TEST, false); for (i, s) in TEST.iter().enumerate() { let hash = hash_leaf!(s); let path = mt.find_path(i).unwrap(); @@ -239,7 +263,7 @@ mod tests { #[test] fn test_path_verify_bad() { - let mt = MerkleTree::new(TEST); + let mt = MerkleTree::new(TEST, false); for (i, s) in BAD.iter().enumerate() { let hash = hash_leaf!(s); let path = mt.find_path(i).unwrap(); diff --git a/multinode-demo/bootstrap-validator.sh b/multinode-demo/bootstrap-validator.sh index f69c05d1ed..983b5eb6be 100755 --- a/multinode-demo/bootstrap-validator.sh +++ b/multinode-demo/bootstrap-validator.sh @@ -103,9 +103,39 @@ while [[ -n $1 ]]; do elif [[ $1 == --skip-require-tower ]]; then maybeRequireTower=false shift + elif [[ $1 == --relayer-url ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --block-engine-url ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --tip-payment-program-pubkey ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --tip-distribution-program-pubkey ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --commission-bps ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --shred-receiver-address ]]; then + args+=("$1" "$2") + shift 2 elif [[ $1 = --log-messages-bytes-limit ]]; then args+=("$1" "$2") shift 2 + elif [[ $1 == --geyser-plugin-config ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --trust-relayer-packets ]]; then + args+=("$1") + shift + elif [[ $1 == --rpc-threads ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --trust-block-engine-packets ]]; then + args+=("$1") + shift else echo "Unknown argument: $1" $program --help @@ -141,6 +171,7 @@ args+=( --no-incremental-snapshots --identity "$identity" --vote-account "$vote_account" + --merkle-root-upload-authority "$identity" --rpc-faucet-address 127.0.0.1:9900 --no-poh-speed-test --no-os-network-limits-test @@ -150,6 +181,9 @@ args+=( ) default_arg --gossip-port 8001 default_arg --log - +default_arg --tip-payment-program-pubkey "DThZmRNNXh7kvTQW9hXeGoWGPKktK8pgVAyoTLjH7UrT" +default_arg --tip-distribution-program-pubkey "FjrdANjvo76aCYQ4kf9FM1R8aESUcEE6F8V7qyoVUQcM" +default_arg --commission-bps 0 pid= diff --git a/multinode-demo/validator.sh b/multinode-demo/validator.sh index 9090055b90..721008a661 100755 --- a/multinode-demo/validator.sh +++ b/multinode-demo/validator.sh @@ -85,6 +85,24 @@ while [[ -n $1 ]]; do vote_account=$2 args+=("$1" "$2") shift 2 + elif [[ $1 == --block-engine-url ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --relayer-url ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 = --merkle-root-upload-authority ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --tip-payment-program-pubkey ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --tip-distribution-program-pubkey ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --commission-bps ]]; then + args+=("$1" "$2") + shift 2 elif [[ $1 = --init-complete-file ]]; then args+=("$1" "$2") shift 2 @@ -182,6 +200,24 @@ while [[ -n $1 ]]; do elif [[ $1 == --skip-require-tower ]]; then maybeRequireTower=false shift + elif [[ $1 == --rpc-pubsub-enable-block-subscription ]]; then + args+=("$1") + shift + elif [[ $1 == --geyser-plugin-config ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --trust-relayer-packets ]]; then + args+=("$1") + shift + elif [[ $1 == --rpc-threads ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --shred-receiver-address ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --trust-block-engine-packets ]]; then + args+=("$1") + shift elif [[ $1 = -h ]]; then usage "$@" else @@ -256,6 +292,10 @@ fi default_arg --identity "$identity" default_arg --vote-account "$vote_account" +default_arg --merkle-root-upload-authority "$identity" +default_arg --tip-payment-program-pubkey "DThZmRNNXh7kvTQW9hXeGoWGPKktK8pgVAyoTLjH7UrT" +default_arg --tip-distribution-program-pubkey "FjrdANjvo76aCYQ4kf9FM1R8aESUcEE6F8V7qyoVUQcM" +default_arg --commission-bps 0 default_arg --ledger "$ledger_dir" default_arg --log - default_arg --full-rpc-api diff --git a/perf/src/sigverify.rs b/perf/src/sigverify.rs index 6078961d42..cbad41c510 100644 --- a/perf/src/sigverify.rs +++ b/perf/src/sigverify.rs @@ -110,7 +110,7 @@ pub fn init() { /// Returns true if the signatrue on the packet verifies. /// Caller must do packet.set_discard(true) if this returns false. #[must_use] -fn verify_packet(packet: &mut Packet, reject_non_vote: bool) -> bool { +pub fn verify_packet(packet: &mut Packet, reject_non_vote: bool) -> bool { // If this packet was already marked as discard, drop it if packet.meta().discard() { return false; diff --git a/poh/src/poh_recorder.rs b/poh/src/poh_recorder.rs index 8fb10807af..f3a2c4e5ce 100644 --- a/poh/src/poh_recorder.rs +++ b/poh/src/poh_recorder.rs @@ -60,9 +60,14 @@ pub enum PohRecorderError { SendError(#[from] SendError), } -type Result = std::result::Result; +pub type Result = std::result::Result; -pub type WorkingBankEntry = (Arc, (Entry, u64)); +#[derive(Clone, Debug)] +pub struct WorkingBankEntry { + pub bank: Arc, + // normal entries have len == 1, bundles have len > 1 + pub entries_ticks: Vec<(Entry, u64)>, +} #[derive(Debug, Clone)] pub struct BankStart { @@ -92,21 +97,19 @@ impl BankStart { type RecordResultSender = Sender>>; pub struct Record { - pub mixin: Hash, - pub transactions: Vec, + // non-bundles shall have mixins_txs.len() == 1, bundles shall have mixins_txs.len() > 1 + pub mixins_txs: Vec<(Hash, Vec)>, pub slot: Slot, pub sender: RecordResultSender, } impl Record { pub fn new( - mixin: Hash, - transactions: Vec, + mixins_txs: Vec<(Hash, Vec)>, slot: Slot, sender: RecordResultSender, ) -> Self { Self { - mixin, - transactions, + mixins_txs, slot, sender, } @@ -160,16 +163,21 @@ impl TransactionRecorder { pub fn record_transactions( &self, bank_slot: Slot, - transactions: Vec, + batches: Vec>, ) -> RecordTransactionsSummary { let mut record_transactions_timings = RecordTransactionsTimings::default(); let mut starting_transaction_index = None; - if !transactions.is_empty() { - let (hash, hash_us) = measure_us!(hash_transactions(&transactions)); + if !batches.is_empty() && !batches.iter().any(|b| b.is_empty()) { + let (hashes, hash_us) = measure_us!(batches + .iter() + .map(|b| hash_transactions(b)) + .collect::>()); record_transactions_timings.hash_us = hash_us; - let (res, poh_record_us) = measure_us!(self.record(bank_slot, hash, transactions)); + let hashes_transactions: Vec<_> = hashes.into_iter().zip(batches).collect(); + + let (res, poh_record_us) = measure_us!(self.record(bank_slot, hashes_transactions)); record_transactions_timings.poh_record_us = poh_record_us; match res { @@ -205,14 +213,13 @@ impl TransactionRecorder { pub fn record( &self, bank_slot: Slot, - mixin: Hash, - transactions: Vec, + mixins_txs: Vec<(Hash, Vec)>, ) -> Result> { // create a new channel so that there is only 1 sender and when it goes out of scope, the receiver fails let (result_sender, result_receiver) = unbounded(); - let res = - self.record_sender - .send(Record::new(mixin, transactions, bank_slot, result_sender)); + let res = self + .record_sender + .send(Record::new(mixins_txs, bank_slot, result_sender)); if res.is_err() { // If the channel is dropped, then the validator is shutting down so return that we are hitting // the max tick height to stop transaction processing and flush any transactions in the pipeline. @@ -678,7 +685,10 @@ impl PohRecorder { for tick in &self.tick_cache[..entry_count] { working_bank.bank.register_tick(&tick.0.hash); - send_result = self.sender.send((working_bank.bank.clone(), tick.clone())); + send_result = self.sender.send(WorkingBankEntry { + bank: working_bank.bank.clone(), + entries_ticks: vec![tick.clone()], + }); if send_result.is_err() { break; } @@ -858,16 +868,23 @@ impl PohRecorder { pub fn record( &mut self, bank_slot: Slot, - mixin: Hash, - transactions: Vec, + mixins_txs: &[(Hash, Vec)], ) -> Result> { // Entries without transactions are used to track real-time passing in the ledger and // cannot be generated by `record()` - assert!(!transactions.is_empty(), "No transactions provided"); + assert!(!mixins_txs.is_empty(), "No transactions provided"); + assert!( + !mixins_txs.iter().any(|(_, txs)| txs.is_empty()), + "One of mixins is missing txs" + ); let ((), report_metrics_time) = measure!(self.report_metrics(bank_slot), "report_metrics"); self.report_metrics_us += report_metrics_time.as_us(); + let mixins: Vec = mixins_txs.iter().map(|(m, _)| *m).collect(); + let transactions: Vec> = + mixins_txs.iter().map(|(_, tx)| tx.clone()).collect(); + loop { let (flush_cache_res, flush_cache_time) = measure!(self.flush_cache(false), "flush_cache"); @@ -885,23 +902,36 @@ impl PohRecorder { let (mut poh_lock, poh_lock_time) = measure!(self.poh.lock().unwrap(), "poh_lock"); self.record_lock_contention_us += poh_lock_time.as_us(); - let (record_mixin_res, record_mixin_time) = - measure!(poh_lock.record(mixin), "record_mixin"); + let (maybe_entries, record_mixin_time) = + measure!(poh_lock.record_bundle(&mixins), "record_mixin"); self.record_us += record_mixin_time.as_us(); drop(poh_lock); - if let Some(poh_entry) = record_mixin_res { - let num_transactions = transactions.len(); + if let Some(entries) = maybe_entries { + assert_eq!(entries.len(), transactions.len()); + let num_transactions = transactions.iter().map(|txs| txs.len()).sum(); let (send_entry_res, send_entry_time) = measure!( { - let entry = Entry { - num_hashes: poh_entry.num_hashes, - hash: poh_entry.hash, - transactions, - }; + let entries_tick_heights: Vec<(Entry, u64)> = entries + .into_iter() + .zip(transactions.into_iter()) + .map(|(poh_entry, transactions)| { + ( + Entry { + num_hashes: poh_entry.num_hashes, + hash: poh_entry.hash, + transactions, + }, + self.tick_height, + ) + }) + .collect(); let bank_clone = working_bank.bank.clone(); - self.sender.send((bank_clone, (entry, self.tick_height))) + self.sender.send(WorkingBankEntry { + bank: bank_clone, + entries_ticks: entries_tick_heights, + }) }, "send_poh_entry", ); @@ -1279,13 +1309,17 @@ mod tests { assert_eq!(poh_recorder.tick_height, tick_height_before + 1); assert_eq!(poh_recorder.tick_cache.len(), 0); let mut num_entries = 0; - while let Ok((wbank, (_entry, _tick_height))) = entry_receiver.try_recv() { + while let Ok(WorkingBankEntry { + bank: wbank, + entries_ticks, + }) = entry_receiver.try_recv() + { assert_eq!(wbank.slot(), bank1.slot()); - num_entries += 1; + num_entries += entries_ticks.len(); } // All the cached ticks, plus the new tick above should have been flushed - assert_eq!(num_entries, num_new_ticks + 1); + assert_eq!(num_entries as u64, num_new_ticks + 1); } Blockstore::destroy(&ledger_path).unwrap(); } @@ -1374,7 +1408,7 @@ mod tests { // We haven't yet reached the minimum tick height for the working bank, // so record should fail assert_matches!( - poh_recorder.record(bank1.slot(), h1, vec![tx.into()]), + poh_recorder.record(bank1.slot(), &[(h1, vec![tx.into()])]), Err(PohRecorderError::MinHeightNotReached) ); assert!(entry_receiver.try_recv().is_err()); @@ -1417,7 +1451,7 @@ mod tests { // However we hand over a bad slot so record fails let bad_slot = bank.slot() + 1; assert_matches!( - poh_recorder.record(bad_slot, h1, vec![tx.into()]), + poh_recorder.record(bad_slot, &[(h1, vec![tx.into()])]), Err(PohRecorderError::MaxHeightReached) ); } @@ -1464,17 +1498,27 @@ mod tests { let tx = test_tx(); let h1 = hash(b"hello world!"); assert!(poh_recorder - .record(bank1.slot(), h1, vec![tx.into()]) + .record(bank1.slot(), &[(h1, vec![tx.into()])]) .is_ok()); assert_eq!(poh_recorder.tick_cache.len(), 0); //tick in the cache + entry for _ in 0..min_tick_height { - let (_bank, (e, _tick_height)) = entry_receiver.recv().unwrap(); + let WorkingBankEntry { + bank: _, + entries_ticks, + } = entry_receiver.recv().unwrap(); + assert_eq!(entries_ticks.len(), 1); + let e = entries_ticks.get(0).unwrap().0.clone(); assert!(e.is_tick()); } - let (_bank, (e, _tick_height)) = entry_receiver.recv().unwrap(); + let WorkingBankEntry { + bank: _, + entries_ticks, + } = entry_receiver.recv().unwrap(); + assert_eq!(entries_ticks.len(), 1); + let e = entries_ticks.get(0).unwrap().0.clone(); assert!(!e.is_tick()); } Blockstore::destroy(&ledger_path).unwrap(); @@ -1510,10 +1554,16 @@ mod tests { let tx = test_tx(); let h1 = hash(b"hello world!"); assert!(poh_recorder - .record(bank.slot(), h1, vec![tx.into()]) + .record(bank.slot(), &[(h1, vec![tx.into()])]) .is_err()); + for _ in 0..num_ticks_to_max { - let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap(); + let WorkingBankEntry { + bank: _, + entries_ticks, + } = entry_receiver.recv().unwrap(); + assert_eq!(entries_ticks.len(), 1); + let entry = entries_ticks.get(0).unwrap().0.clone(); assert!(entry.is_tick()); } } @@ -1558,7 +1608,7 @@ mod tests { let tx1 = test_tx(); let h1 = hash(b"hello world!"); let record_result = poh_recorder - .record(bank.slot(), h1, vec![tx0.into(), tx1.into()]) + .record(bank.slot(), &[(h1, vec![tx0.into(), tx1.into()])]) .unwrap() .unwrap(); assert_eq!(record_result, 0); @@ -1575,7 +1625,7 @@ mod tests { let tx = test_tx(); let h2 = hash(b"foobar"); let record_result = poh_recorder - .record(bank.slot(), h2, vec![tx.into()]) + .record(bank.slot(), &[(h2, vec![tx.into()])]) .unwrap() .unwrap(); assert_eq!(record_result, 2); @@ -1842,7 +1892,7 @@ mod tests { let tx = test_tx(); let h1 = hash(b"hello world!"); assert!(poh_recorder - .record(bank.slot(), h1, vec![tx.into()]) + .record(bank.slot(), &[(h1, vec![tx.into()])]) .is_err()); assert!(poh_recorder.working_bank.is_none()); diff --git a/poh/src/poh_service.rs b/poh/src/poh_service.rs index caa2c2a7c8..b4ce97e129 100644 --- a/poh/src/poh_service.rs +++ b/poh/src/poh_service.rs @@ -192,11 +192,12 @@ impl PohService { if let Ok(record) = record { if record .sender - .send(poh_recorder.write().unwrap().record( - record.slot, - record.mixin, - record.transactions, - )) + .send( + poh_recorder + .write() + .unwrap() + .record(record.slot, &record.mixins_txs), + ) .is_err() { panic!("Error returning mixin hash"); @@ -255,11 +256,7 @@ impl PohService { timing.total_lock_time_ns += lock_time.as_ns(); let mut record_time = Measure::start("record"); loop { - let res = poh_recorder_l.record( - record.slot, - record.mixin, - std::mem::take(&mut record.transactions), - ); + let res = poh_recorder_l.record(record.slot, &record.mixins_txs); // what do we do on failure here? Ignore for now. let (_send_res, send_record_result_time) = measure!(record.sender.send(res), "send_record_result"); @@ -381,6 +378,7 @@ impl PohService { mod tests { use { super::*, + crate::poh_recorder::WorkingBankEntry, rand::{thread_rng, Rng}, solana_ledger::{ blockstore::Blockstore, @@ -460,11 +458,10 @@ mod tests { loop { // send some data let mut time = Measure::start("record"); - let _ = poh_recorder.write().unwrap().record( - bank_slot, - h1, - vec![tx.clone()], - ); + let _ = poh_recorder + .write() + .unwrap() + .record(bank_slot, &[(h1, vec![tx.clone()])]); time.stop(); total_us += time.as_us(); total_times += 1; @@ -509,7 +506,12 @@ mod tests { let time = Instant::now(); while run_time != 0 || need_tick || need_entry || need_partial { - let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap(); + let WorkingBankEntry { + bank: _, + entries_ticks, + } = entry_receiver.recv().unwrap(); + assert_eq!(entries_ticks.len(), 0); + let entry = entries_ticks.get(0).unwrap().0.clone(); if entry.is_tick() { num_ticks += 1; diff --git a/program-runtime/src/timings.rs b/program-runtime/src/timings.rs index 8eeb9c5a00..74b7eb732f 100644 --- a/program-runtime/src/timings.rs +++ b/program-runtime/src/timings.rs @@ -8,7 +8,7 @@ use { }, }; -#[derive(Default, Debug, PartialEq, Eq)] +#[derive(Clone, Default, Debug, PartialEq, Eq)] pub struct ProgramTiming { pub accumulated_us: u64, pub accumulated_units: u64, @@ -53,6 +53,7 @@ pub enum ExecuteTimingType { UpdateTransactionStatuses, } +#[derive(Clone)] pub struct Metrics([u64; ExecuteTimingType::CARDINALITY]); impl Index for Metrics { @@ -309,7 +310,7 @@ impl ThreadExecuteTimings { } } -#[derive(Debug, Default)] +#[derive(Clone, Debug, Default)] pub struct ExecuteTimings { pub metrics: Metrics, pub details: ExecuteDetailsTimings, @@ -333,9 +334,21 @@ impl ExecuteTimings { None => debug_assert!(idx < ExecuteTimingType::CARDINALITY, "Index out of bounds"), } } + + pub fn accumulate_execute_units_and_time(&self) -> (u64, u64) { + self.details + .per_program_timings + .values() + .fold((0, 0), |(units, times), program_timings| { + ( + units.saturating_add(program_timings.accumulated_units), + times.saturating_add(program_timings.accumulated_us), + ) + }) + } } -#[derive(Default, Debug)] +#[derive(Clone, Default, Debug)] pub struct ExecuteProcessInstructionTimings { pub total_us: u64, pub verify_caller_us: u64, @@ -355,7 +368,7 @@ impl ExecuteProcessInstructionTimings { } } -#[derive(Default, Debug)] +#[derive(Clone, Default, Debug)] pub struct ExecuteAccessoryTimings { pub feature_set_clone_us: u64, pub compute_budget_process_transaction_us: u64, @@ -380,7 +393,7 @@ impl ExecuteAccessoryTimings { } } -#[derive(Default, Debug, PartialEq, Eq)] +#[derive(Clone, Default, Debug, PartialEq, Eq)] pub struct ExecuteDetailsTimings { pub serialize_us: u64, pub create_vm_us: u64, diff --git a/program-test/src/programs.rs b/program-test/src/programs.rs index 8d9a42790f..0cbff58997 100644 --- a/program-test/src/programs.rs +++ b/program-test/src/programs.rs @@ -21,6 +21,13 @@ mod spl_associated_token_account { solana_sdk::declare_id!("ATokenGPvbdGVxr1b2hvZbsiqW5xWH25efTNsLJA8knL"); } +mod jito_tip_payment { + solana_sdk::declare_id!("T1pyyaTNZsKv2WcRAB8oVnk93mLJw2XzjtVYqCsaHqt"); +} +mod jito_tip_distribution { + solana_sdk::declare_id!("4R3gSG8BpU4t19KYj8CfnbtRpnT8gtk4dvTHxVRwc2r7"); +} + static SPL_PROGRAMS: &[(Pubkey, Pubkey, &[u8])] = &[ ( spl_token::ID, @@ -47,6 +54,16 @@ static SPL_PROGRAMS: &[(Pubkey, Pubkey, &[u8])] = &[ solana_sdk::bpf_loader::ID, include_bytes!("programs/spl_associated_token_account-1.1.1.so"), ), + ( + jito_tip_distribution::ID, + solana_sdk::bpf_loader::ID, + include_bytes!("programs/jito_tip_distribution-0.1.4.so"), + ), + ( + jito_tip_payment::ID, + solana_sdk::bpf_loader::ID, + include_bytes!("programs/jito_tip_payment-0.1.4.so"), + ), ]; pub fn spl_programs(rent: &Rent) -> Vec<(Pubkey, AccountSharedData)> { diff --git a/program-test/src/programs/jito_tip_distribution-0.1.4.so b/program-test/src/programs/jito_tip_distribution-0.1.4.so new file mode 100644 index 0000000000..29fc1a0eb5 Binary files /dev/null and b/program-test/src/programs/jito_tip_distribution-0.1.4.so differ diff --git a/program-test/src/programs/jito_tip_payment-0.1.4.so b/program-test/src/programs/jito_tip_payment-0.1.4.so new file mode 100644 index 0000000000..49e574ea8c Binary files /dev/null and b/program-test/src/programs/jito_tip_payment-0.1.4.so differ diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 7748b95b7a..6c59ee9f4f 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -126,6 +126,145 @@ dependencies = [ "alloc-no-stdlib", ] +[[package]] +name = "anchor-attribute-access-control" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "anyhow", + "proc-macro2 1.0.67", + "quote 1.0.33", + "regex", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-account" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "anyhow", + "bs58 0.4.0", + "proc-macro2 1.0.67", + "quote 1.0.33", + "rustversion", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-constant" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "proc-macro2 1.0.67", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-error" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "proc-macro2 1.0.67", + "quote 1.0.33", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-event" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "anyhow", + "proc-macro2 1.0.67", + "quote 1.0.33", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-interface" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "anyhow", + "heck 0.3.3", + "proc-macro2 1.0.67", + "quote 1.0.33", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-program" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "anyhow", + "proc-macro2 1.0.67", + "quote 1.0.33", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-state" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "anyhow", + "proc-macro2 1.0.67", + "quote 1.0.33", + "syn 1.0.109", +] + +[[package]] +name = "anchor-derive-accounts" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "anyhow", + "proc-macro2 1.0.67", + "quote 1.0.33", + "syn 1.0.109", +] + +[[package]] +name = "anchor-lang" +version = "0.24.2" +dependencies = [ + "anchor-attribute-access-control", + "anchor-attribute-account", + "anchor-attribute-constant", + "anchor-attribute-error", + "anchor-attribute-event", + "anchor-attribute-interface", + "anchor-attribute-program", + "anchor-attribute-state", + "anchor-derive-accounts", + "arrayref", + "base64 0.13.1", + "bincode", + "borsh 0.10.3", + "bytemuck", + "solana-program", + "thiserror", +] + +[[package]] +name = "anchor-syn" +version = "0.24.2" +dependencies = [ + "anyhow", + "bs58 0.3.1", + "heck 0.3.3", + "proc-macro2 1.0.67", + "proc-macro2-diagnostics", + "quote 1.0.33", + "serde", + "serde_json", + "sha2 0.9.9", + "syn 1.0.109", + "thiserror", +] + [[package]] name = "android-tzdata" version = "0.1.1" @@ -216,7 +355,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" dependencies = [ - "quote", + "quote 1.0.33", "syn 1.0.109", ] @@ -228,8 +367,8 @@ checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" dependencies = [ "num-bigint 0.4.4", "num-traits", - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 1.0.109", ] @@ -264,8 +403,8 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 1.0.109", ] @@ -325,8 +464,8 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 1.0.109", "synstructure", ] @@ -337,8 +476,8 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 1.0.109", ] @@ -398,8 +537,8 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "10f203db73a71dfa2fb6dd22763990fa26f3d2625a6da2da900d23b87d26be27" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 1.0.109", ] @@ -409,8 +548,8 @@ version = "0.1.73" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 2.0.37", ] @@ -560,8 +699,8 @@ dependencies = [ "lazycell", "peeking_take_while", "prettyplease 0.2.4", - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "regex", "rustc-hash", "shlex", @@ -681,7 +820,7 @@ dependencies = [ "borsh-derive-internal 0.9.3", "borsh-schema-derive-internal 0.9.3", "proc-macro-crate 0.1.5", - "proc-macro2", + "proc-macro2 1.0.67", "syn 1.0.109", ] @@ -694,7 +833,7 @@ dependencies = [ "borsh-derive-internal 0.10.3", "borsh-schema-derive-internal 0.10.3", "proc-macro-crate 0.1.5", - "proc-macro2", + "proc-macro2 1.0.67", "syn 1.0.109", ] @@ -704,8 +843,8 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5449c28a7b352f2d1e592a8a28bf139bc71afb0764a14f3c02500935d8c44065" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 1.0.109", ] @@ -715,8 +854,8 @@ version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afb438156919598d2c7bad7e1c0adf3d26ed3840dbc010db1a882a65583ca2fb" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 1.0.109", ] @@ -726,8 +865,8 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cdbd5696d8bfa21d53d9fe39a714a18538bad11492a42d066dbbc395fb1951c0" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 1.0.109", ] @@ -737,8 +876,8 @@ version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "634205cc43f74a1b9046ef87c4540ebda95696ec0f315024860cad7c5b0f5ccd" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 1.0.109", ] @@ -763,6 +902,12 @@ dependencies = [ "alloc-stdlib", ] +[[package]] +name = "bs58" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "476e9cd489f9e121e02ffa6014a8ef220ecb15c05ed23fc34cca13925dc283fb" + [[package]] name = "bs58" version = "0.4.0" @@ -815,8 +960,8 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1aca418a974d83d40a0c1f0c5cba6ff4bc28d8df099109ca459a2118d40b6322" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 1.0.109", ] @@ -1204,8 +1349,8 @@ checksum = "ab8bfa2e259f8ee1ce5e97824a3c55ec4404a0d772ca7fa96bf19f0752a046eb" dependencies = [ "fnv", "ident_case", - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "strsim 0.10.0", "syn 2.0.37", ] @@ -1217,7 +1362,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", - "quote", + "quote 1.0.33", "syn 2.0.37", ] @@ -1238,6 +1383,17 @@ version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" +[[package]] +name = "default-env" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f753eb82d29277e79efc625e84aecacfd4851ee50e05a8573a4740239a77bfd3" +dependencies = [ + "proc-macro2 0.4.30", + "quote 0.6.13", + "syn 0.15.44", +] + [[package]] name = "der" version = "0.5.1" @@ -1273,8 +1429,8 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 1.0.109", ] @@ -1285,8 +1441,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case", - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "rustc_version", "syn 1.0.109", ] @@ -1368,8 +1524,8 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3bf95dc3f046b9da4f2d51833c0d3547d8564ef6910f5c1ed130306a75b92886" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 1.0.109", ] @@ -1391,8 +1547,8 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 2.0.37", ] @@ -1444,8 +1600,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f86b50932a01e7ec5c06160492ab660fb19b6bb2a7878030dd6cd68d21df9d4d" dependencies = [ "enum-ordinalize", - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 1.0.109", ] @@ -1494,8 +1650,8 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 2.0.37", ] @@ -1507,8 +1663,8 @@ checksum = "0b166c9e378360dd5a6666a9604bb4f54ae0cac39023ffbac425e917a2a04fef" dependencies = [ "num-bigint 0.4.4", "num-traits", - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 1.0.109", ] @@ -1743,8 +1899,8 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 2.0.37", ] @@ -1975,6 +2131,15 @@ dependencies = [ "http", ] +[[package]] +name = "heck" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" +dependencies = [ + "unicode-segmentation", +] + [[package]] name = "heck" version = "0.4.0" @@ -2285,6 +2450,49 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +[[package]] +name = "jito-programs-vote-state" +version = "0.1.5" +dependencies = [ + "anchor-lang", + "bincode", + "serde", + "serde_derive", + "solana-program", +] + +[[package]] +name = "jito-protos" +version = "1.17.23" +dependencies = [ + "bytes", + "prost", + "prost-types", + "protobuf-src", + "tonic", + "tonic-build", +] + +[[package]] +name = "jito-tip-distribution" +version = "0.1.5" +dependencies = [ + "anchor-lang", + "default-env", + "jito-programs-vote-state", + "solana-program", + "solana-security-txt", +] + +[[package]] +name = "jito-tip-payment" +version = "0.1.5" +dependencies = [ + "anchor-lang", + "default-env", + "solana-security-txt", +] + [[package]] name = "jobserver" version = "0.1.21" @@ -2365,8 +2573,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b939a78fa820cdfcb7ee7484466746a7377760970f6f9c6fe19f9edcc8a38d2" dependencies = [ "proc-macro-crate 0.1.5", - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 1.0.109", ] @@ -2800,8 +3008,8 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a7d5f7076603ebc68de2dc6a650ec331a062a13abaa346975be747bbfa4b789" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 1.0.109", ] @@ -2915,8 +3123,8 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c8b15b261814f992e33760b1fca9fe8b693d8a65299f20c9901688636cfb746" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 1.0.109", ] @@ -2926,8 +3134,8 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e6a0fd4f737c707bd9086cc16c925f294943eb62eb71499e9fd4cf71f8b9f4e" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 2.0.37", ] @@ -3008,8 +3216,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" dependencies = [ "proc-macro-crate 1.1.3", - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 2.0.37", ] @@ -3020,8 +3228,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c11e44798ad209ccdd91fc192f0526a369a01234f7373e1b141c96d7cee4f0e" dependencies = [ "proc-macro-crate 1.1.3", - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 2.0.37", ] @@ -3097,8 +3305,8 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 1.0.109", ] @@ -3176,8 +3384,8 @@ checksum = "5f7d21ccd03305a674437ee1248f3ab5d4b1db095cf1caf49f1713ddf61956b7" dependencies = [ "Inflector", "proc-macro-error", - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 1.0.109", ] @@ -3330,8 +3538,8 @@ checksum = "99b8db626e31e5b81787b9783425769681b347011cc59471e33ea46d2ea0cf55" dependencies = [ "pest", "pest_meta", - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 1.0.109", ] @@ -3371,8 +3579,8 @@ version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 1.0.109", ] @@ -3447,7 +3655,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b83ec2d0af5c5c556257ff52c9f98934e243b9fd39604bfb2a9b75ec2e97f18" dependencies = [ - "proc-macro2", + "proc-macro2 1.0.67", "syn 1.0.109", ] @@ -3457,7 +3665,7 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ - "proc-macro2", + "proc-macro2 1.0.67", "syn 2.0.37", ] @@ -3487,8 +3695,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 1.0.109", "version_check", ] @@ -3499,11 +3707,20 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "version_check", ] +[[package]] +name = "proc-macro2" +version = "0.4.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" +dependencies = [ + "unicode-xid 0.1.0", +] + [[package]] name = "proc-macro2" version = "1.0.67" @@ -3513,6 +3730,19 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "proc-macro2-diagnostics" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bf29726d67464d49fa6224a1d07936a8c08bb3fba727c7493f6cf1616fdaada" +dependencies = [ + "proc-macro2 1.0.67", + "quote 1.0.33", + "syn 1.0.109", + "version_check", + "yansi", +] + [[package]] name = "prost" version = "0.11.9" @@ -3530,7 +3760,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes", - "heck", + "heck 0.4.0", "itertools", "lazy_static", "log", @@ -3553,8 +3783,8 @@ checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools", - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 1.0.109", ] @@ -3591,8 +3821,8 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 2.0.37", ] @@ -3644,13 +3874,22 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "quote" +version = "0.6.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" +dependencies = [ + "proc-macro2 0.4.30", +] + [[package]] name = "quote" version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ - "proc-macro2", + "proc-macro2 1.0.67", ] [[package]] @@ -3995,7 +4234,7 @@ checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" dependencies = [ "log", "ring", - "rustls-webpki", + "rustls-webpki 0.101.4", "sct", ] @@ -4029,6 +4268,16 @@ dependencies = [ "base64 0.13.1", ] +[[package]] +name = "rustls-webpki" +version = "0.100.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6a5fc258f1c1276dfe3016516945546e2d5383911efc0fc4f1cdc5df3a4ae3" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "rustls-webpki" version = "0.101.4" @@ -4091,8 +4340,8 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bdbda6ac5cd1321e724fa9cee216f3a61885889b896f073b8f82322789c5250e" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 1.0.109", ] @@ -4159,8 +4408,8 @@ version = "1.0.188" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 2.0.37", ] @@ -4204,8 +4453,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "881b6f881b17d13214e5d494c939ebab463d01264ce1811e9d4ac3a882e7695f" dependencies = [ "darling", - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 2.0.37", ] @@ -4453,7 +4702,7 @@ dependencies = [ "Inflector", "base64 0.21.4", "bincode", - "bs58", + "bs58 0.4.0", "bv", "lazy_static", "serde", @@ -4579,6 +4828,7 @@ dependencies = [ "solana-accounts-db", "solana-banks-interface", "solana-client", + "solana-gossip", "solana-runtime", "solana-sdk", "solana-send-transaction-service", @@ -4647,6 +4897,26 @@ dependencies = [ "tempfile", ] +[[package]] +name = "solana-bundle" +version = "1.17.23" +dependencies = [ + "anchor-lang", + "itertools", + "log", + "serde", + "solana-accounts-db", + "solana-ledger", + "solana-logger", + "solana-measure", + "solana-poh", + "solana-program-runtime", + "solana-runtime", + "solana-sdk", + "solana-transaction-status", + "thiserror", +] + [[package]] name = "solana-clap-utils" version = "1.17.22" @@ -4776,9 +5046,10 @@ dependencies = [ name = "solana-core" version = "1.17.22" dependencies = [ + "anchor-lang", "base64 0.21.4", "bincode", - "bs58", + "bs58 0.4.0", "bytes", "chrono", "crossbeam-channel", @@ -4788,11 +5059,16 @@ dependencies = [ "futures 0.3.28", "histogram", "itertools", + "jito-protos", + "jito-tip-distribution", + "jito-tip-payment", "lazy_static", "log", "lru", "min-max-heap", "num_enum 0.6.1", + "prost", + "prost-types", "quinn", "rand 0.8.5", "rand_chacha 0.3.1", @@ -4806,6 +5082,7 @@ dependencies = [ "serde_derive", "solana-accounts-db", "solana-bloom", + "solana-bundle", "solana-client", "solana-cost-model", "solana-entry", @@ -4825,6 +5102,7 @@ dependencies = [ "solana-rpc", "solana-rpc-client-api", "solana-runtime", + "solana-runtime-plugin", "solana-sdk", "solana-send-transaction-service", "solana-streamer", @@ -4841,6 +5119,8 @@ dependencies = [ "tempfile", "thiserror", "tokio", + "tonic", + "tonic-build", "trees", ] @@ -4927,7 +5207,7 @@ dependencies = [ "ahash 0.8.5", "blake3", "block-buffer 0.10.4", - "bs58", + "bs58 0.4.0", "bv", "byteorder 1.4.3", "cc", @@ -4952,8 +5232,8 @@ dependencies = [ name = "solana-frozen-abi-macro" version = "1.17.22" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "rustc_version", "syn 2.0.37", ] @@ -4983,7 +5263,7 @@ dependencies = [ name = "solana-geyser-plugin-manager" version = "1.17.22" dependencies = [ - "bs58", + "bs58 0.4.0", "crossbeam-channel", "json5", "jsonrpc-core", @@ -5242,7 +5522,7 @@ dependencies = [ "blake3", "borsh 0.10.3", "borsh 0.9.3", - "bs58", + "bs58 0.4.0", "bv", "bytemuck", "cc", @@ -5413,7 +5693,7 @@ version = "1.17.22" dependencies = [ "base64 0.21.4", "bincode", - "bs58", + "bs58 0.4.0", "crossbeam-channel", "dashmap", "itertools", @@ -5432,6 +5712,7 @@ dependencies = [ "soketto", "solana-account-decoder", "solana-accounts-db", + "solana-bundle", "solana-client", "solana-entry", "solana-faucet", @@ -5441,6 +5722,7 @@ dependencies = [ "solana-metrics", "solana-perf", "solana-poh", + "solana-program-runtime", "solana-rayon-threadlimit", "solana-rpc-client-api", "solana-runtime", @@ -5469,7 +5751,7 @@ dependencies = [ "async-trait", "base64 0.21.4", "bincode", - "bs58", + "bs58 0.4.0", "indicatif", "log", "reqwest", @@ -5491,7 +5773,7 @@ name = "solana-rpc-client-api" version = "1.17.22" dependencies = [ "base64 0.21.4", - "bs58", + "bs58 0.4.0", "jsonrpc-core", "reqwest", "semver", @@ -5499,6 +5781,8 @@ dependencies = [ "serde_derive", "serde_json", "solana-account-decoder", + "solana-accounts-db", + "solana-bundle", "solana-sdk", "solana-transaction-status", "solana-version", @@ -5592,6 +5876,24 @@ dependencies = [ "zstd", ] +[[package]] +name = "solana-runtime-plugin" +version = "1.17.23" +dependencies = [ + "crossbeam-channel", + "json5", + "jsonrpc-core", + "jsonrpc-core-client", + "jsonrpc-derive", + "jsonrpc-ipc-server", + "jsonrpc-server-utils", + "libloading", + "log", + "solana-runtime", + "solana-sdk", + "thiserror", +] + [[package]] name = "solana-sbf-programs" version = "1.17.22" @@ -6018,12 +6320,13 @@ dependencies = [ name = "solana-sdk" version = "1.17.22" dependencies = [ + "anchor-lang", "assert_matches", "base64 0.21.4", "bincode", "bitflags 2.3.3", "borsh 0.10.3", - "bs58", + "bs58 0.4.0", "bytemuck", "byteorder 1.4.3", "chrono", @@ -6070,9 +6373,9 @@ dependencies = [ name = "solana-sdk-macro" version = "1.17.22" dependencies = [ - "bs58", - "proc-macro2", - "quote", + "bs58 0.4.0", + "proc-macro2 1.0.67", + "quote 1.0.33", "rustversion", "syn 2.0.37", ] @@ -6090,6 +6393,7 @@ dependencies = [ "crossbeam-channel", "log", "solana-client", + "solana-gossip", "solana-measure", "solana-metrics", "solana-runtime", @@ -6147,7 +6451,7 @@ name = "solana-storage-proto" version = "1.17.22" dependencies = [ "bincode", - "bs58", + "bs58 0.4.0", "prost", "protobuf-src", "serde", @@ -6272,7 +6576,7 @@ dependencies = [ "base64 0.21.4", "bincode", "borsh 0.10.3", - "bs58", + "bs58 0.4.0", "lazy_static", "log", "serde", @@ -6384,6 +6688,7 @@ dependencies = [ "solana-rpc-client", "solana-rpc-client-api", "solana-runtime", + "solana-runtime-plugin", "solana-sdk", "solana-send-transaction-service", "solana-storage-bigtable", @@ -6395,6 +6700,7 @@ dependencies = [ "symlink", "thiserror", "tikv-jemallocator", + "tonic", ] [[package]] @@ -6561,7 +6867,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fadbefec4f3c678215ca72bd71862697bb06b41fd77c0088902dd3203354387b" dependencies = [ - "quote", + "quote 1.0.33", "spl-discriminator-syn", "syn 2.0.37", ] @@ -6572,8 +6878,8 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e5f2044ca42c8938d54d1255ce599c79a1ffd86b677dfab695caa20f9ffc3f2" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "sha2 0.10.7", "syn 2.0.37", "thiserror", @@ -6620,8 +6926,8 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab5269c8e868da17b6552ef35a51355a017bd8e0eae269c201fef830d35fa52c" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "sha2 0.10.7", "syn 2.0.37", ] @@ -6779,9 +7085,9 @@ version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ - "heck", - "proc-macro2", - "quote", + "heck 0.4.0", + "proc-macro2 1.0.67", + "quote 1.0.33", "rustversion", "syn 1.0.109", ] @@ -6798,14 +7104,25 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7973cce6668464ea31f176d85b13c7ab3bba2cb3b77a2ed26abd7801688010a" +[[package]] +name = "syn" +version = "0.15.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5" +dependencies = [ + "proc-macro2 0.4.30", + "quote 0.6.13", + "unicode-xid 0.1.0", +] + [[package]] name = "syn" version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "unicode-ident", ] @@ -6815,8 +7132,8 @@ version = "2.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7303ef2c05cd654186cb250d29049a24840ca25d2747c25c0381c8d9e2f582e8" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "unicode-ident", ] @@ -6832,10 +7149,10 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 1.0.109", - "unicode-xid", + "unicode-xid 0.2.3", ] [[package]] @@ -6902,8 +7219,8 @@ version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ee42b4e559f17bce0385ebf511a7beb67d5cc33c12c96b7f4e9789919d9c10f" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 1.0.109", ] @@ -6946,8 +7263,8 @@ checksum = "54c25e2cb8f5fcd7318157634e8838aa6f7e4715c96637f969fabaccd1ef5462" dependencies = [ "cfg-if 1.0.0", "proc-macro-error", - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 2.0.37", ] @@ -6958,8 +7275,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37cfd7bbc88a0104e304229fba519bdc45501a30b760fb72240342f1289ad257" dependencies = [ "proc-macro-error", - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 2.0.37", "test-case-core", ] @@ -6994,8 +7311,8 @@ version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "10712f02019e9288794769fba95cd6847df9874d49d871d062172f9dd41bc4cc" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 2.0.37", ] @@ -7117,8 +7434,8 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 2.0.37", ] @@ -7244,6 +7561,7 @@ dependencies = [ "percent-encoding 2.3.0", "pin-project", "prost", + "rustls-native-certs", "rustls-pemfile 1.0.0", "tokio", "tokio-rustls", @@ -7252,6 +7570,7 @@ dependencies = [ "tower-layer", "tower-service", "tracing", + "webpki-roots 0.23.1", ] [[package]] @@ -7261,9 +7580,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6fdaae4c2c638bb70fe42803a26fbd6fc6ac8c72f5c59f67ecc2a2dcabf4b07" dependencies = [ "prettyplease 0.1.9", - "proc-macro2", + "proc-macro2 1.0.67", "prost-build", - "quote", + "quote 1.0.33", "syn 1.0.109", ] @@ -7317,8 +7636,8 @@ version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 2.0.37", ] @@ -7430,12 +7749,24 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-segmentation" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" + [[package]] name = "unicode-width" version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" +[[package]] +name = "unicode-xid" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" + [[package]] name = "unicode-xid" version = "0.2.3" @@ -7593,8 +7924,8 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 2.0.37", "wasm-bindgen-shared", ] @@ -7617,7 +7948,7 @@ version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ - "quote", + "quote 1.0.33", "wasm-bindgen-macro-support", ] @@ -7627,8 +7958,8 @@ version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 2.0.37", "wasm-bindgen-backend", "wasm-bindgen-shared", @@ -7650,13 +7981,22 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "webpki-roots" +version = "0.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338" +dependencies = [ + "rustls-webpki 0.100.3", +] + [[package]] name = "webpki-roots" version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b291546d5d9d1eab74f069c77749f2cb8504a12caa20f0f2de93ddbf6f411888" dependencies = [ - "rustls-webpki", + "rustls-webpki 0.101.4", ] [[package]] @@ -7931,6 +8271,12 @@ dependencies = [ "libc", ] +[[package]] +name = "yansi" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" + [[package]] name = "yasna" version = "0.5.0" @@ -7955,8 +8301,8 @@ version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 2.0.37", ] @@ -7975,8 +8321,8 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.67", + "quote 1.0.33", "syn 2.0.37", ] diff --git a/programs/sbf/tests/programs.rs b/programs/sbf/tests/programs.rs index 8da4cdc18f..5dcadee626 100644 --- a/programs/sbf/tests/programs.rs +++ b/programs/sbf/tests/programs.rs @@ -134,7 +134,7 @@ fn execute_transactions( let batch = bank.prepare_batch_for_tests(txs.clone()); let mut timings = ExecuteTimings::default(); let mut mint_decimals = HashMap::new(); - let tx_pre_token_balances = collect_token_balances(&bank, &batch, &mut mint_decimals); + let tx_pre_token_balances = collect_token_balances(&bank, &batch, &mut mint_decimals, None); let ( TransactionResults { execution_results, .. @@ -154,7 +154,7 @@ fn execute_transactions( &mut timings, None, ); - let tx_post_token_balances = collect_token_balances(&bank, &batch, &mut mint_decimals); + let tx_post_token_balances = collect_token_balances(&bank, &batch, &mut mint_decimals, None); izip!( txs.iter(), diff --git a/rpc-client-api/Cargo.toml b/rpc-client-api/Cargo.toml index 92bc9d4958..1ebc304544 100644 --- a/rpc-client-api/Cargo.toml +++ b/rpc-client-api/Cargo.toml @@ -19,6 +19,8 @@ serde = { workspace = true } serde_derive = { workspace = true } serde_json = { workspace = true } solana-account-decoder = { workspace = true } +solana-accounts-db = { workspace = true } +solana-bundle = { workspace = true } solana-sdk = { workspace = true } solana-transaction-status = { workspace = true } solana-version = { workspace = true } diff --git a/rpc-client-api/src/bundles.rs b/rpc-client-api/src/bundles.rs new file mode 100644 index 0000000000..72afc51d91 --- /dev/null +++ b/rpc-client-api/src/bundles.rs @@ -0,0 +1,166 @@ +//! Use a separate file for Jito related code to minimize upstream merge conflicts. + +use { + crate::config::RpcSimulateTransactionAccountsConfig, + solana_account_decoder::UiAccount, + solana_accounts_db::transaction_results::TransactionExecutionResult, + solana_bundle::{bundle_execution::LoadAndExecuteBundleError, BundleExecutionError}, + solana_sdk::{ + clock::Slot, + commitment_config::{CommitmentConfig, CommitmentLevel}, + signature::Signature, + transaction::TransactionError, + }, + solana_transaction_status::{UiTransactionEncoding, UiTransactionReturnData}, + thiserror::Error, +}; + +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(rename_all = "camelCase")] +pub enum RpcBundleSimulationSummary { + /// error and offending transaction signature if applicable + Failed { + error: RpcBundleExecutionError, + tx_signature: Option, + }, + Succeeded, +} + +#[derive(Error, Debug, Clone, Serialize, Deserialize)] +pub enum RpcBundleExecutionError { + #[error("The bank has hit the max allotted time for processing transactions")] + BankProcessingTimeLimitReached, + + #[error("Error locking bundle because a transaction is malformed")] + BundleLockError, + + #[error("Bundle execution timed out")] + BundleExecutionTimeout, + + #[error("The bundle exceeds the cost model")] + ExceedsCostModel, + + #[error("Invalid pre or post accounts")] + InvalidPreOrPostAccounts, + + #[error("PoH record error: {0}")] + PohRecordError(String), + + #[error("Tip payment error: {0}")] + TipError(String), + + #[error("A transaction in the bundle failed to execute: [signature={0}, error={1}]")] + TransactionFailure(Signature, String), +} + +impl From for RpcBundleExecutionError { + fn from(bundle_execution_error: BundleExecutionError) -> Self { + match bundle_execution_error { + BundleExecutionError::BankProcessingTimeLimitReached => { + Self::BankProcessingTimeLimitReached + } + BundleExecutionError::ExceedsCostModel => Self::ExceedsCostModel, + BundleExecutionError::TransactionFailure(load_and_execute_bundle_error) => { + match load_and_execute_bundle_error { + LoadAndExecuteBundleError::ProcessingTimeExceeded(_) => { + Self::BundleExecutionTimeout + } + LoadAndExecuteBundleError::LockError { + signature, + transaction_error, + } => Self::TransactionFailure(signature, transaction_error.to_string()), + LoadAndExecuteBundleError::TransactionError { + signature, + execution_result, + } => match *execution_result { + TransactionExecutionResult::Executed { details, .. } => { + let err_msg = if let Err(e) = details.status { + e.to_string() + } else { + "Unknown error".to_string() + }; + Self::TransactionFailure(signature, err_msg) + } + TransactionExecutionResult::NotExecuted(e) => { + Self::TransactionFailure(signature, e.to_string()) + } + }, + LoadAndExecuteBundleError::InvalidPreOrPostAccounts => { + Self::InvalidPreOrPostAccounts + } + } + } + BundleExecutionError::LockError => Self::BundleLockError, + BundleExecutionError::PohRecordError(e) => Self::PohRecordError(e.to_string()), + BundleExecutionError::TipError(e) => Self::TipError(e.to_string()), + } + } +} + +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(rename_all = "camelCase")] +pub struct RpcSimulateBundleResult { + pub summary: RpcBundleSimulationSummary, + pub transaction_results: Vec, +} + +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(rename_all = "camelCase")] +pub struct RpcSimulateBundleTransactionResult { + pub err: Option, + pub logs: Option>, + pub pre_execution_accounts: Option>, + pub post_execution_accounts: Option>, + pub units_consumed: Option, + pub return_data: Option, +} + +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcSimulateBundleConfig { + /// Gives the state of accounts pre/post transaction execution. + /// The length of each of these must be equal to the number transactions. + pub pre_execution_accounts_configs: Vec>, + pub post_execution_accounts_configs: Vec>, + + /// Specifies the encoding scheme of the contained transactions. + pub transaction_encoding: Option, + + /// Specifies the bank to run simulation against. + pub simulation_bank: Option, + + /// Opt to skip sig-verify for faster performance. + #[serde(default)] + pub skip_sig_verify: bool, + + /// Replace recent blockhash to simulate old transactions without resigning. + #[serde(default)] + pub replace_recent_blockhash: bool, +} + +#[derive(Serialize, Deserialize, Clone, Copy, Debug)] +#[serde(rename_all = "camelCase")] +pub enum SimulationSlotConfig { + /// Simulate on top of bank with the provided commitment. + Commitment(CommitmentConfig), + + /// Simulate on the provided slot's bank. + Slot(Slot), + + /// Simulates on top of the RPC's highest slot's bank i.e. the working bank. + Tip, +} + +impl Default for SimulationSlotConfig { + fn default() -> Self { + Self::Commitment(CommitmentConfig { + commitment: CommitmentLevel::Confirmed, + }) + } +} + +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcBundleRequest { + pub encoded_transactions: Vec, +} diff --git a/rpc-client-api/src/config.rs b/rpc-client-api/src/config.rs index 9ecff334ca..d16df2d165 100644 --- a/rpc-client-api/src/config.rs +++ b/rpc-client-api/src/config.rs @@ -46,7 +46,7 @@ pub struct RpcSimulateTransactionConfig { pub min_context_slot: Option, } -#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct RpcRequestAirdropConfig { pub recent_blockhash: Option, // base-58 encoded blockhash diff --git a/rpc-client-api/src/lib.rs b/rpc-client-api/src/lib.rs index 6386a433f7..a8c01769a4 100644 --- a/rpc-client-api/src/lib.rs +++ b/rpc-client-api/src/lib.rs @@ -1,5 +1,6 @@ #![allow(clippy::arithmetic_side_effects)] +pub mod bundles; pub mod client_error; pub mod config; pub mod custom_error; diff --git a/rpc-client-api/src/request.rs b/rpc-client-api/src/request.rs index e9c6ef9b38..e1e5cfd756 100644 --- a/rpc-client-api/src/request.rs +++ b/rpc-client-api/src/request.rs @@ -113,6 +113,7 @@ pub enum RpcRequest { RequestAirdrop, SendTransaction, SimulateTransaction, + SimulateBundle, SignVote, } @@ -189,6 +190,7 @@ impl fmt::Display for RpcRequest { RpcRequest::RequestAirdrop => "requestAirdrop", RpcRequest::SendTransaction => "sendTransaction", RpcRequest::SimulateTransaction => "simulateTransaction", + RpcRequest::SimulateBundle => "simulateBundle", RpcRequest::SignVote => "signVote", }; @@ -258,6 +260,7 @@ pub enum RpcError { RpcRequestError(String), #[error("RPC response error {code}: {message} {data}")] RpcResponseError { + request_id: u64, code: i64, message: String, data: RpcResponseErrorData, diff --git a/rpc-client-api/src/response.rs b/rpc-client-api/src/response.rs index 7591c58c03..e20b92d1fc 100644 --- a/rpc-client-api/src/response.rs +++ b/rpc-client-api/src/response.rs @@ -36,6 +36,7 @@ impl OptionalContext { } } +pub type BatchRpcResult = client_error::Result>>; pub type RpcResult = client_error::Result>; #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] @@ -46,6 +47,15 @@ pub struct RpcResponseContext { pub api_version: Option, } +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct BatchRpcResponseContext { + #[serde(skip_serializing_if = "Option::is_none")] + pub slot: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub api_version: Option, +} + #[derive(Debug, Clone, PartialEq, Eq)] pub struct RpcApiVersion(semver::Version); @@ -92,6 +102,12 @@ impl RpcResponseContext { } } +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct BatchResponse { + pub id: u64, + pub result: Response, +} + #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct Response { pub context: RpcResponseContext, diff --git a/rpc-client/src/http_sender.rs b/rpc-client/src/http_sender.rs index 6ef22cc42c..ac30e3e22a 100644 --- a/rpc-client/src/http_sender.rs +++ b/rpc-client/src/http_sender.rs @@ -11,7 +11,7 @@ use { }, solana_rpc_client_api::{ client_error::Result, - custom_error, + custom_error::{self}, error_object::RpcErrorObject, request::{RpcError, RpcRequest, RpcResponseErrorData}, response::RpcSimulateTransactionResult, @@ -82,62 +82,74 @@ impl HttpSender { ); default_headers } -} -struct StatsUpdater<'a> { - stats: &'a RwLock, - request_start_time: Instant, - rate_limited_time: Duration, -} + fn check_response(json: &serde_json::Value) -> Result<()> { + if json["error"].is_object() { + return match serde_json::from_value::(json["error"].clone()) { + Ok(rpc_error_object) => { + let data = match rpc_error_object.code { + custom_error::JSON_RPC_SERVER_ERROR_SEND_TRANSACTION_PREFLIGHT_FAILURE => { + match serde_json::from_value::( + json["error"]["data"].clone(), + ) { + Ok(data) => { + RpcResponseErrorData::SendTransactionPreflightFailure(data) + } + Err(err) => { + debug!( + "Failed to deserialize RpcSimulateTransactionResult: {:?}", + err + ); + RpcResponseErrorData::Empty + } + } + } + custom_error::JSON_RPC_SERVER_ERROR_NODE_UNHEALTHY => { + match serde_json::from_value::( + json["error"]["data"].clone(), + ) { + Ok(custom_error::NodeUnhealthyErrorData { num_slots_behind }) => { + RpcResponseErrorData::NodeUnhealthy { num_slots_behind } + } + Err(_err) => RpcResponseErrorData::Empty, + } + } + _ => RpcResponseErrorData::Empty, + }; -impl<'a> StatsUpdater<'a> { - fn new(stats: &'a RwLock) -> Self { - Self { - stats, - request_start_time: Instant::now(), - rate_limited_time: Duration::default(), + Err(RpcError::RpcResponseError { + request_id: json["id"].as_u64().unwrap(), + code: rpc_error_object.code, + message: rpc_error_object.message, + data, + } + .into()) + } + Err(err) => Err(RpcError::RpcRequestError(format!( + "Failed to deserialize RPC error response: {} [{}]", + serde_json::to_string(&json["error"]).unwrap(), + err + )) + .into()), + }; } + Ok(()) } - fn add_rate_limited_time(&mut self, duration: Duration) { - self.rate_limited_time += duration; - } -} - -impl<'a> Drop for StatsUpdater<'a> { - fn drop(&mut self) { - let mut stats = self.stats.write().unwrap(); - stats.request_count += 1; - stats.elapsed_time += Instant::now().duration_since(self.request_start_time); - stats.rate_limited_time += self.rate_limited_time; - } -} - -#[async_trait] -impl RpcSender for HttpSender { - fn get_transport_stats(&self) -> RpcTransportStats { - self.stats.read().unwrap().clone() - } - - async fn send( + async fn do_send_with_retry( &self, - request: RpcRequest, - params: serde_json::Value, - ) -> Result { + request: serde_json::Value, + ) -> reqwest::Result { let mut stats_updater = StatsUpdater::new(&self.stats); - - let request_id = self.request_id.fetch_add(1, Ordering::Relaxed); - let request_json = request.build_request_json(request_id, params).to_string(); - let mut too_many_requests_retries = 5; loop { let response = { let client = self.client.clone(); - let request_json = request_json.clone(); + let request = request.to_string(); client .post(&self.url) .header(CONTENT_TYPE, "application/json") - .body(request_json) + .body(request) .send() .await }?; @@ -165,54 +177,81 @@ impl RpcSender for HttpSender { sleep(duration).await; stats_updater.add_rate_limited_time(duration); + continue; } - return Err(response.error_for_status().unwrap_err().into()); - } - let mut json = response.json::().await?; - if json["error"].is_object() { - return match serde_json::from_value::(json["error"].clone()) { - Ok(rpc_error_object) => { - let data = match rpc_error_object.code { - custom_error::JSON_RPC_SERVER_ERROR_SEND_TRANSACTION_PREFLIGHT_FAILURE => { - match serde_json::from_value::(json["error"]["data"].clone()) { - Ok(data) => RpcResponseErrorData::SendTransactionPreflightFailure(data), - Err(err) => { - debug!("Failed to deserialize RpcSimulateTransactionResult: {:?}", err); - RpcResponseErrorData::Empty - } - } - }, - custom_error::JSON_RPC_SERVER_ERROR_NODE_UNHEALTHY => { - match serde_json::from_value::(json["error"]["data"].clone()) { - Ok(custom_error::NodeUnhealthyErrorData {num_slots_behind}) => RpcResponseErrorData::NodeUnhealthy {num_slots_behind}, - Err(_err) => { - RpcResponseErrorData::Empty - } - } - }, - _ => RpcResponseErrorData::Empty - }; - - Err(RpcError::RpcResponseError { - code: rpc_error_object.code, - message: rpc_error_object.message, - data, - } - .into()) - } - Err(err) => Err(RpcError::RpcRequestError(format!( - "Failed to deserialize RPC error response: {} [{}]", - serde_json::to_string(&json["error"]).unwrap(), - err - )) - .into()), - }; + return Err(response.error_for_status().unwrap_err()); } - return Ok(json["result"].take()); + + return response.json::().await; } } +} + +struct StatsUpdater<'a> { + stats: &'a RwLock, + request_start_time: Instant, + rate_limited_time: Duration, +} + +impl<'a> StatsUpdater<'a> { + fn new(stats: &'a RwLock) -> Self { + Self { + stats, + request_start_time: Instant::now(), + rate_limited_time: Duration::default(), + } + } + + fn add_rate_limited_time(&mut self, duration: Duration) { + self.rate_limited_time += duration; + } +} + +impl<'a> Drop for StatsUpdater<'a> { + fn drop(&mut self) { + let mut stats = self.stats.write().unwrap(); + stats.request_count += 1; + stats.elapsed_time += Instant::now().duration_since(self.request_start_time); + stats.rate_limited_time += self.rate_limited_time; + } +} + +#[async_trait] +impl RpcSender for HttpSender { + async fn send( + &self, + request: RpcRequest, + params: serde_json::Value, + ) -> Result { + let request_id = self.request_id.fetch_add(1, Ordering::Relaxed); + let request = request.build_request_json(request_id, params); + let mut resp = self.do_send_with_retry(request).await?; + Self::check_response(&resp)?; + + Ok(resp["result"].take()) + } + + async fn send_batch( + &self, + requests_and_params: Vec<(RpcRequest, serde_json::Value)>, + ) -> Result { + let mut batch_request = vec![]; + for (request_id, req) in requests_and_params.into_iter().enumerate() { + batch_request.push(req.0.build_request_json(request_id as u64, req.1)); + } + + let resp = self + .do_send_with_retry(serde_json::Value::Array(batch_request)) + .await?; + + Ok(resp) + } + + fn get_transport_stats(&self) -> RpcTransportStats { + self.stats.read().unwrap().clone() + } fn url(&self) -> String { self.url.clone() diff --git a/rpc-client/src/mock_sender.rs b/rpc-client/src/mock_sender.rs index 654f45d029..d710735dbc 100644 --- a/rpc-client/src/mock_sender.rs +++ b/rpc-client/src/mock_sender.rs @@ -489,4 +489,11 @@ impl RpcSender for MockSender { fn url(&self) -> String { format!("MockSender: {}", self.url) } + + async fn send_batch( + &self, + _requests_and_params: Vec<(RpcRequest, serde_json::Value)>, + ) -> Result { + todo!() + } } diff --git a/rpc-client/src/nonblocking/rpc_client.rs b/rpc-client/src/nonblocking/rpc_client.rs index 21350938a7..145c3417e0 100644 --- a/rpc-client/src/nonblocking/rpc_client.rs +++ b/rpc-client/src/nonblocking/rpc_client.rs @@ -33,6 +33,10 @@ use { UiAccount, UiAccountData, UiAccountEncoding, }, solana_rpc_client_api::{ + bundles::{ + RpcBundleRequest, RpcSimulateBundleConfig, RpcSimulateBundleResult, + SimulationSlotConfig, + }, client_error::{ Error as ClientError, ErrorKind as ClientErrorKind, Result as ClientResult, }, @@ -43,6 +47,7 @@ use { }, solana_sdk::{ account::Account, + bundle::VersionedBundle, clock::{Epoch, Slot, UnixTimestamp, DEFAULT_MS_PER_SLOT}, commitment_config::{CommitmentConfig, CommitmentLevel}, epoch_info::EpochInfo, @@ -51,7 +56,7 @@ use { hash::Hash, pubkey::Pubkey, signature::Signature, - transaction, + transaction::{self, VersionedTransaction}, }, solana_transaction_status::{ EncodedConfirmedBlock, EncodedConfirmedTransactionWithStatusMeta, TransactionStatus, @@ -970,6 +975,7 @@ impl RpcClient { code, message, data, + .. }) = &err.kind { debug!("{} {}", code, message); @@ -1412,6 +1418,113 @@ impl RpcClient { .await } + pub async fn batch_simulate_bundle( + &self, + bundles: &[VersionedBundle], + ) -> BatchRpcResult { + let configs = bundles + .iter() + .map(|b| RpcSimulateBundleConfig { + simulation_bank: Some(SimulationSlotConfig::Commitment(self.commitment())), + pre_execution_accounts_configs: vec![None; b.transactions.len()], + post_execution_accounts_configs: vec![None; b.transactions.len()], + ..RpcSimulateBundleConfig::default() + }) + .collect::>(); + + self.batch_simulate_bundle_with_config(bundles.iter().zip(configs).collect()) + .await + } + + pub async fn batch_simulate_bundle_with_config( + &self, + bundles_and_configs: Vec<(&VersionedBundle, RpcSimulateBundleConfig)>, + ) -> BatchRpcResult { + let mut params = vec![]; + for (bundle, config) in bundles_and_configs { + let transaction_encoding = if let Some(encoding) = config.transaction_encoding { + encoding + } else { + self.default_cluster_transaction_encoding().await? + }; + + let simulation_bank = config.simulation_bank.unwrap_or_default(); + + let config = RpcSimulateBundleConfig { + transaction_encoding: Some(transaction_encoding), + simulation_bank: Some(simulation_bank), + ..config + }; + + let encoded_transactions = bundle + .transactions + .iter() + .map(|tx| serialize_and_encode::(tx, transaction_encoding)) + .collect::, ClientError>>()?; + let rpc_bundle_request = RpcBundleRequest { + encoded_transactions, + }; + + params.push(json!([rpc_bundle_request, config])); + } + + let requests_and_params = vec![RpcRequest::SimulateBundle; params.len()] + .into_iter() + .zip(params) + .collect(); + self.send_batch(requests_and_params).await + } + + pub async fn simulate_bundle( + &self, + bundle: &VersionedBundle, + ) -> RpcResult { + self.simulate_bundle_with_config( + bundle, + RpcSimulateBundleConfig { + simulation_bank: Some(SimulationSlotConfig::Commitment(self.commitment())), + pre_execution_accounts_configs: vec![None; bundle.transactions.len()], + post_execution_accounts_configs: vec![None; bundle.transactions.len()], + ..RpcSimulateBundleConfig::default() + }, + ) + .await + } + + pub async fn simulate_bundle_with_config( + &self, + bundle: &VersionedBundle, + config: RpcSimulateBundleConfig, + ) -> RpcResult { + let transaction_encoding = if let Some(enc) = config.transaction_encoding { + enc + } else { + self.default_cluster_transaction_encoding().await? + }; + let simulation_bank = Some(config.simulation_bank.unwrap_or_default()); + + let encoded_transactions = bundle + .transactions + .iter() + .map(|tx| serialize_and_encode::(tx, transaction_encoding)) + .collect::>>()?; + let rpc_bundle_request = RpcBundleRequest { + encoded_transactions, + }; + + let config = RpcSimulateBundleConfig { + transaction_encoding: Some(transaction_encoding), + simulation_bank, + ..config + }; + + self.send( + RpcRequest::SimulateBundle, + json!([rpc_bundle_request, config]), + ) + .await + } + /// Returns the highest slot information that the node has snapshots for. /// /// This will find the highest full snapshot slot, and the highest incremental snapshot slot @@ -5375,6 +5488,22 @@ impl RpcClient { .map_err(|err| ClientError::new_with_request(err.into(), request)) } + pub async fn send_batch( + &self, + requests_and_params: Vec<(RpcRequest, Value)>, + ) -> ClientResult + where + T: serde::de::DeserializeOwned, + { + let response = self.sender.send_batch(requests_and_params).await?; + debug!("response: {:?}", response); + + serde_json::from_value(response).map_err(|err| ClientError { + request: None, + kind: err.into(), + }) + } + pub fn get_transport_stats(&self) -> RpcTransportStats { self.sender.get_transport_stats() } diff --git a/rpc-client/src/rpc_client.rs b/rpc-client/src/rpc_client.rs index afccd7af00..33301ea73c 100644 --- a/rpc-client/src/rpc_client.rs +++ b/rpc-client/src/rpc_client.rs @@ -28,6 +28,7 @@ use { UiAccount, UiAccountEncoding, }, solana_rpc_client_api::{ + bundles::{RpcSimulateBundleConfig, RpcSimulateBundleResult}, client_error::{Error as ClientError, ErrorKind, Result as ClientResult}, config::{RpcAccountInfoConfig, *}, request::{RpcRequest, TokenAccountsFilter}, @@ -35,6 +36,7 @@ use { }, solana_sdk::{ account::{Account, ReadableAccount}, + bundle::VersionedBundle, clock::{Epoch, Slot, UnixTimestamp}, commitment_config::CommitmentConfig, epoch_info::EpochInfo, @@ -1151,6 +1153,34 @@ impl RpcClient { ) } + pub fn batch_simulate_bundle( + &self, + bundles: &[VersionedBundle], + ) -> BatchRpcResult { + self.invoke(self.rpc_client.batch_simulate_bundle(bundles)) + } + + pub fn batch_simulate_bundle_with_config( + &self, + bundles_and_configs: Vec<(&VersionedBundle, RpcSimulateBundleConfig)>, + ) -> BatchRpcResult { + self.invoke( + (self.rpc_client.as_ref()).batch_simulate_bundle_with_config(bundles_and_configs), + ) + } + + pub fn simulate_bundle(&self, bundle: &VersionedBundle) -> RpcResult { + self.invoke((self.rpc_client.as_ref()).simulate_bundle(bundle)) + } + + pub fn simulate_bundle_with_config( + &self, + bundle: &VersionedBundle, + config: RpcSimulateBundleConfig, + ) -> RpcResult { + self.invoke((self.rpc_client.as_ref()).simulate_bundle_with_config(bundle, config)) + } + /// Returns the highest slot information that the node has snapshots for. /// /// This will find the highest full snapshot slot, and the highest incremental snapshot slot diff --git a/rpc-client/src/rpc_sender.rs b/rpc-client/src/rpc_sender.rs index 948ac45a46..6a357b4e6b 100644 --- a/rpc-client/src/rpc_sender.rs +++ b/rpc-client/src/rpc_sender.rs @@ -31,6 +31,10 @@ pub trait RpcSender { request: RpcRequest, params: serde_json::Value, ) -> Result; + async fn send_batch( + &self, + requests_and_params: Vec<(RpcRequest, serde_json::Value)>, + ) -> Result; fn get_transport_stats(&self) -> RpcTransportStats; fn url(&self) -> String; } diff --git a/rpc-test/Cargo.toml b/rpc-test/Cargo.toml index eb69e4c95d..ea8d766641 100644 --- a/rpc-test/Cargo.toml +++ b/rpc-test/Cargo.toml @@ -33,6 +33,7 @@ solana-transaction-status = { workspace = true } tokio = { workspace = true, features = ["full"] } [dev-dependencies] +serial_test = { workspace = true } solana-logger = { workspace = true } [package.metadata.docs.rs] diff --git a/rpc-test/tests/rpc.rs b/rpc-test/tests/rpc.rs index f1c2d4acb9..e7f85e2285 100644 --- a/rpc-test/tests/rpc.rs +++ b/rpc-test/tests/rpc.rs @@ -5,6 +5,7 @@ use { log::*, reqwest::{self, header::CONTENT_TYPE}, serde_json::{json, Value}, + serial_test::serial, solana_account_decoder::UiAccount, solana_client::{ connection_cache::ConnectionCache, @@ -241,6 +242,7 @@ fn test_rpc_slot_updates() { } #[test] +#[serial] // helps test pass fn test_rpc_subscriptions() { solana_logger::setup(); diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 3edd4b3800..292cef2365 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -31,6 +31,7 @@ serde_json = { workspace = true } soketto = { workspace = true } solana-account-decoder = { workspace = true } solana-accounts-db = { workspace = true } +solana-bundle = { workspace = true } solana-client = { workspace = true } solana-entry = { workspace = true } solana-faucet = { workspace = true } @@ -40,6 +41,7 @@ solana-measure = { workspace = true } solana-metrics = { workspace = true } solana-perf = { workspace = true } solana-poh = { workspace = true } +solana-program-runtime = { workspace = true } solana-rayon-threadlimit = { workspace = true } solana-rpc-client-api = { workspace = true } solana-runtime = { workspace = true } diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index d7d0d8b6cc..8f861b6f77 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -230,6 +230,13 @@ impl JsonRpcRequestProcessor { Ok(bank) } + fn bank_from_slot(&self, slot: Slot) -> Option> { + debug!("Slot: {:?}", slot); + + let r_bank_forks = self.bank_forks.read().unwrap(); + r_bank_forks.get(slot) + } + #[allow(deprecated)] fn bank(&self, commitment: Option) -> Arc { debug!("RPC commitment_config: {:?}", commitment); @@ -361,13 +368,10 @@ impl JsonRpcRequestProcessor { ); ClusterInfo::new(contact_info, keypair, socket_addr_space) }); - let tpu_address = cluster_info - .my_contact_info() - .tpu(connection_cache.protocol()) - .unwrap(); + let (sender, receiver) = unbounded(); SendTransactionService::new::( - tpu_address, + cluster_info.clone(), &bank_forks, None, receiver, @@ -2652,13 +2656,16 @@ pub mod rpc_minimal { }) .unwrap(); - let full_snapshot_slot = - snapshot_utils::get_highest_full_snapshot_archive_slot(full_snapshot_archives_dir) - .ok_or(RpcCustomError::NoSnapshot)?; + let full_snapshot_slot = snapshot_utils::get_highest_full_snapshot_archive_slot( + full_snapshot_archives_dir, + None, + ) + .ok_or(RpcCustomError::NoSnapshot)?; let incremental_snapshot_slot = snapshot_utils::get_highest_incremental_snapshot_archive_slot( incremental_snapshot_archives_dir, full_snapshot_slot, + None, ); Ok(RpcSnapshotSlotInfo { @@ -3264,13 +3271,168 @@ pub mod rpc_accounts_scan { } } +pub mod utils { + use { + crate::rpc::encode_account, + jsonrpc_core::Error, + solana_account_decoder::{UiAccount, UiAccountEncoding}, + solana_bundle::{ + bundle_execution::{LoadAndExecuteBundleError, LoadAndExecuteBundleOutput}, + BundleExecutionError, + }, + solana_rpc_client_api::{ + bundles::{ + RpcBundleExecutionError, RpcBundleSimulationSummary, RpcSimulateBundleConfig, + RpcSimulateBundleResult, RpcSimulateBundleTransactionResult, + }, + config::RpcSimulateTransactionAccountsConfig, + }, + solana_sdk::{account::AccountSharedData, pubkey::Pubkey}, + std::str::FromStr, + }; + + /// Encodes the accounts, returns an error if any of the accounts failed to encode + /// The outer error can be set by error parsing, Ok(None) means there wasn't any accounts in the parameter + fn try_encode_accounts( + accounts: &Option>, + encoding: UiAccountEncoding, + ) -> Result>, Error> { + if let Some(accounts) = accounts { + Ok(Some( + accounts + .iter() + .map(|(pubkey, account)| encode_account(account, pubkey, encoding, None)) + .collect::, Error>>()?, + )) + } else { + Ok(None) + } + } + + pub fn rpc_bundle_result_from_bank_result( + bundle_execution_result: LoadAndExecuteBundleOutput, + rpc_config: RpcSimulateBundleConfig, + ) -> Result { + let summary = match bundle_execution_result.result() { + Ok(_) => RpcBundleSimulationSummary::Succeeded, + Err(e) => { + let tx_signature = match e { + LoadAndExecuteBundleError::TransactionError { signature, .. } + | LoadAndExecuteBundleError::LockError { signature, .. } => { + Some(signature.to_string()) + } + _ => None, + }; + RpcBundleSimulationSummary::Failed { + error: RpcBundleExecutionError::from(BundleExecutionError::TransactionFailure( + e.clone(), + )), + tx_signature, + } + } + }; + + let mut transaction_results = Vec::new(); + for bundle_output in bundle_execution_result.bundle_transaction_results() { + for (index, execution_result) in bundle_output + .execution_results() + .iter() + .enumerate() + .filter(|(_, result)| result.was_executed()) + { + // things are filtered by was_executed, so safe to unwrap here + let result = execution_result.flattened_result(); + let details = execution_result.details().unwrap(); + + let account_config = rpc_config + .pre_execution_accounts_configs + .get(transaction_results.len()) + .ok_or_else(|| Error::invalid_params("the length of pre_execution_accounts_configs must match the number of transactions"))?; + let account_encoding = account_config + .as_ref() + .and_then(|config| config.encoding) + .unwrap_or(UiAccountEncoding::Base64); + + let pre_execution_accounts = if let Some(pre_tx_accounts) = + bundle_output.pre_tx_execution_accounts().get(index) + { + try_encode_accounts(pre_tx_accounts, account_encoding)? + } else { + None + }; + + let post_execution_accounts = if let Some(post_tx_accounts) = + bundle_output.post_tx_execution_accounts().get(index) + { + try_encode_accounts(post_tx_accounts, account_encoding)? + } else { + None + }; + + transaction_results.push(RpcSimulateBundleTransactionResult { + err: match result { + Ok(_) => None, + Err(e) => Some(e), + }, + logs: details.log_messages.clone(), + pre_execution_accounts, + post_execution_accounts, + units_consumed: Some(details.executed_units), + return_data: details.return_data.clone().map(|data| data.into()), + }); + } + } + + Ok(RpcSimulateBundleResult { + summary, + transaction_results, + }) + } + + pub fn account_configs_to_accounts( + accounts_config: &[Option], + ) -> Result>>, Error> { + let mut execution_accounts = Vec::new(); + for account_config in accounts_config { + let accounts = match account_config { + None => None, + Some(account_config) => Some( + account_config + .addresses + .iter() + .map(|a| { + Pubkey::from_str(a).map_err(|_| { + Error::invalid_params(format!("invalid pubkey provided: {}", a)) + }) + }) + .collect::, Error>>()?, + ), + }; + execution_accounts.push(accounts); + } + Ok(execution_accounts) + } +} + // Full RPC interface that an API node is expected to provide // (rpc_minimal should also be provided by an API node) pub mod rpc_full { use { super::*, - solana_sdk::message::{SanitizedVersionedMessage, VersionedMessage}, + crate::rpc::utils::{account_configs_to_accounts, rpc_bundle_result_from_bank_result}, + jsonrpc_core::ErrorCode, + solana_bundle::bundle_execution::{load_and_execute_bundle, LoadAndExecuteBundleError}, + solana_rpc_client_api::bundles::{ + RpcBundleRequest, RpcSimulateBundleConfig, RpcSimulateBundleResult, + SimulationSlotConfig, + }, + solana_sdk::{ + bundle::{derive_bundle_id, SanitizedBundle}, + clock::MAX_PROCESSING_AGE, + message::{SanitizedVersionedMessage, VersionedMessage}, + }, }; + #[rpc] pub trait Full { type Metadata; @@ -3332,6 +3494,14 @@ pub mod rpc_full { config: Option, ) -> Result>; + #[rpc(meta, name = "simulateBundle")] + fn simulate_bundle( + &self, + meta: Self::Metadata, + rpc_bundle_request: RpcBundleRequest, + config: Option, + ) -> Result>; + #[rpc(meta, name = "minimumLedgerSlot")] fn minimum_ledger_slot(&self, meta: Self::Metadata) -> Result; @@ -3825,6 +3995,146 @@ pub mod rpc_full { )) } + // TODO (LB): probably want to add a max transaction size and max account return size and max + // allowable simulation time + fn simulate_bundle( + &self, + meta: Self::Metadata, + rpc_bundle_request: RpcBundleRequest, + config: Option, + ) -> Result> { + const MAX_BUNDLE_SIMULATION_TIME: Duration = Duration::from_millis(500); + + debug!("simulate_bundle rpc request received"); + + let config = config.unwrap_or_else(|| RpcSimulateBundleConfig { + pre_execution_accounts_configs: vec![ + None; + rpc_bundle_request.encoded_transactions.len() + ], + post_execution_accounts_configs: vec![ + None; + rpc_bundle_request.encoded_transactions.len() + ], + ..RpcSimulateBundleConfig::default() + }); + + // Run some request validations + if !(config.pre_execution_accounts_configs.len() + == rpc_bundle_request.encoded_transactions.len() + && config.post_execution_accounts_configs.len() + == rpc_bundle_request.encoded_transactions.len()) + { + return Err(Error::invalid_params( + "pre/post_execution_accounts_configs must be equal in length to the number of transactions", + )); + } + + let bank = match config.simulation_bank.unwrap_or_default() { + SimulationSlotConfig::Commitment(commitment) => Ok(meta.bank(Some(commitment))), + SimulationSlotConfig::Slot(slot) => meta.bank_from_slot(slot).ok_or_else(|| { + Error::invalid_params(format!("bank not found for the provided slot: {}", slot)) + }), + SimulationSlotConfig::Tip => Ok(meta.bank_forks.read().unwrap().working_bank()), + }?; + + let tx_encoding = config + .transaction_encoding + .unwrap_or(UiTransactionEncoding::Base64); + let binary_encoding = tx_encoding.into_binary_encoding().ok_or_else(|| { + Error::invalid_params(format!( + "Unsupported encoding: {}. Supported encodings are: base58 & base64", + tx_encoding + )) + })?; + let mut decoded_transactions = rpc_bundle_request + .encoded_transactions + .into_iter() + .map(|encoded_tx| { + decode_and_deserialize::(encoded_tx, binary_encoding) + .map(|de| de.1) + }) + .collect::>>()?; + + if config.replace_recent_blockhash { + if !config.skip_sig_verify { + return Err(Error::invalid_params( + "sigVerify may not be used with replaceRecentBlockhash", + )); + } + decoded_transactions.iter_mut().for_each(|tx| { + tx.message.set_recent_blockhash(bank.last_blockhash()); + }); + } + + let bundle_id = derive_bundle_id(&decoded_transactions); + let sanitized_bundle = SanitizedBundle { + transactions: decoded_transactions + .into_iter() + .map(|tx| sanitize_transaction(tx, bank.as_ref())) + .collect::>>()?, + bundle_id, + }; + + if !config.skip_sig_verify { + for tx in &sanitized_bundle.transactions { + verify_transaction(tx, &bank.feature_set)?; + } + } + + let pre_execution_accounts = + account_configs_to_accounts(&config.pre_execution_accounts_configs)?; + let post_execution_accounts = + account_configs_to_accounts(&config.post_execution_accounts_configs)?; + + let bundle_execution_result = load_and_execute_bundle( + &bank, + &sanitized_bundle, + MAX_PROCESSING_AGE, + &MAX_BUNDLE_SIMULATION_TIME, + true, + true, + true, + true, + &None, + true, + None, + &pre_execution_accounts, + &post_execution_accounts, + ); + + // only return error if irrecoverable (timeout or tx malformed) + // bundle execution failures w/ context are returned to client + match bundle_execution_result.result() { + Ok(()) | Err(LoadAndExecuteBundleError::TransactionError { .. }) => {} + Err(LoadAndExecuteBundleError::ProcessingTimeExceeded(elapsed)) => { + let mut error = Error::new(ErrorCode::ServerError(10_000)); + error.message = format!( + "simulation time exceeded max allowed time: {:?}ms", + elapsed.as_millis() + ); + return Err(error); + } + Err(LoadAndExecuteBundleError::InvalidPreOrPostAccounts) => { + return Err(Error::invalid_params("invalid pre or post account data")); + } + Err(LoadAndExecuteBundleError::LockError { + signature, + transaction_error, + }) => { + return Err(Error::invalid_params(format!( + "error locking transaction with signature: {}, error: {:?}", + signature, transaction_error + ))); + } + } + + let rpc_bundle_result = + rpc_bundle_result_from_bank_result(bundle_execution_result, config)?; + + Ok(new_response(&bank, rpc_bundle_result)) + } + fn minimum_ledger_slot(&self, meta: Self::Metadata) -> Result { debug!("minimum_ledger_slot rpc request received"); meta.minimum_ledger_slot() @@ -4155,6 +4465,7 @@ pub mod rpc_deprecated_v1_9 { .and_then(|snapshot_config| { snapshot_utils::get_highest_full_snapshot_archive_slot( snapshot_config.full_snapshot_archives_dir, + None, ) }) .ok_or_else(|| RpcCustomError::NoSnapshot.into()) @@ -4638,6 +4949,7 @@ pub mod tests { }, rpc_subscriptions::RpcSubscriptions, }, + base64::engine::general_purpose, bincode::deserialize, jsonrpc_core::{futures, ErrorCode, MetaIoHandler, Output, Response, Value}, jsonrpc_core_client::transports::local, @@ -5849,6 +6161,146 @@ pub mod tests { assert_eq!(result.len(), 0); } + #[test] + fn test_rpc_simulate_bundle_happy_path() { + // 1. setup + let rpc = RpcHandler::start(); + let bank = rpc.working_bank(); + + let recent_blockhash = bank.confirmed_last_blockhash(); + let RpcHandler { + ref meta, ref io, .. + } = rpc; + + let data_len = 100; + let lamports = bank.get_minimum_balance_for_rent_exemption(data_len); + let leader_pubkey = solana_sdk::pubkey::new_rand(); + let leader_account_data = AccountSharedData::new(lamports, data_len, &system_program::id()); + bank.store_account(&leader_pubkey, &leader_account_data); + bank.freeze(); + + // 2. build bundle + + // let's pretend the RPC keypair is a searcher + let searcher_keypair = rpc.mint_keypair; + + // create tip tx + let tip_amount = 10000; + let tip_tx = VersionedTransaction::from(system_transaction::transfer( + &searcher_keypair, + &leader_pubkey, + tip_amount, + recent_blockhash, + )); + + // some random mev tx + let mev_amount = 20000; + let goku_pubkey = solana_sdk::pubkey::new_rand(); + let mev_tx = VersionedTransaction::from(system_transaction::transfer( + &searcher_keypair, + &goku_pubkey, + mev_amount, + recent_blockhash, + )); + + let encoded_mev_tx = general_purpose::STANDARD.encode(serialize(&mev_tx).unwrap()); + let encoded_tip_tx = general_purpose::STANDARD.encode(serialize(&tip_tx).unwrap()); + let b64_data = general_purpose::STANDARD.encode(leader_account_data.data()); + + // 3. test and assert + let skip_sig_verify = true; + let replace_recent_blockhash = false; + let expected_response = json!({ + "jsonrpc": "2.0", + "result": { + "context": {"slot": bank.slot(), "apiVersion": RpcApiVersion::default()}, + "value":{ + "summary": "succeeded", + "transactionResults": [ + { + "err": null, + "logs": ["Program 11111111111111111111111111111111 invoke [1]", "Program 11111111111111111111111111111111 success"], + "returnData": null, + "unitsConsumed": 150, + "postExecutionAccounts": [], + "preExecutionAccounts": [ + { + "data": [b64_data, "base64"], + "executable": false, + "lamports": leader_account_data.lamports(), + "owner": "11111111111111111111111111111111", + "rentEpoch": 0, + "space": 100 + } + ], + }, + { + "err": null, + "logs": ["Program 11111111111111111111111111111111 invoke [1]", "Program 11111111111111111111111111111111 success"], + "returnData": null, + "unitsConsumed": 150, + "preExecutionAccounts": [], + "postExecutionAccounts": [ + { + "data": [b64_data, "base64"], + "executable": false, + "lamports": leader_account_data.lamports() + tip_amount, + "owner": "11111111111111111111111111111111", + "rentEpoch": u64::MAX, + "space": 100 + } + ], + }, + ], + } + }, + "id": 1, + }); + + let request = format!( + r#"{{"jsonrpc":"2.0", + "id":1, + "method":"simulateBundle", + "params":[ + {{ + "encodedTransactions": ["{}", "{}"] + }}, + {{ + "skipSigVerify": {}, + "replaceRecentBlockhash": {}, + "slot": {}, + "preExecutionAccountsConfigs": [ + {{ "encoding": "base64", "addresses": ["{}"] }}, + {{ "encoding": "base64", "addresses": [] }} + ], + "postExecutionAccountsConfigs": [ + {{ "encoding": "base64", "addresses": [] }}, + {{ "encoding": "base64", "addresses": ["{}"] }} + ] + }} + ] + }}"#, + encoded_mev_tx, + encoded_tip_tx, + skip_sig_verify, + replace_recent_blockhash, + bank.slot(), + leader_pubkey, + leader_pubkey, + ); + + let actual_response = io + .handle_request_sync(&request, meta.clone()) + .expect("response"); + + let expected_response = serde_json::from_value::(expected_response) + .expect("expected_response deserialization"); + let actual_response = serde_json::from_str::(&actual_response) + .expect("actual_response deserialization"); + + assert_eq!(expected_response, actual_response); + } + #[test] fn test_rpc_simulate_transaction() { let rpc = RpcHandler::start(); @@ -6575,10 +7027,7 @@ pub mod tests { ClusterInfo::new(contact_info, keypair, SocketAddrSpace::Unspecified) }); let connection_cache = Arc::new(ConnectionCache::new("connection_cache_test")); - let tpu_address = cluster_info - .my_contact_info() - .tpu(connection_cache.protocol()) - .unwrap(); + let (meta, receiver) = JsonRpcRequestProcessor::new( JsonRpcConfig::default(), None, @@ -6587,7 +7036,7 @@ pub mod tests { blockstore, validator_exit, health.clone(), - cluster_info, + cluster_info.clone(), Hash::default(), None, optimistically_confirmed_bank, @@ -6599,7 +7048,7 @@ pub mod tests { Arc::new(PrioritizationFeeCache::default()), ); SendTransactionService::new::( - tpu_address, + cluster_info, &bank_forks, None, receiver, @@ -6847,12 +7296,9 @@ pub mod tests { let cluster_info = Arc::new(new_test_cluster_info()); let connection_cache = Arc::new(ConnectionCache::new("connection_cache_test")); - let tpu_address = cluster_info - .my_contact_info() - .tpu(connection_cache.protocol()) - .unwrap(); let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); + let (request_processor, receiver) = JsonRpcRequestProcessor::new( JsonRpcConfig::default(), None, @@ -6861,7 +7307,7 @@ pub mod tests { blockstore.clone(), validator_exit, RpcHealth::stub(optimistically_confirmed_bank.clone(), blockstore), - cluster_info, + cluster_info.clone(), Hash::default(), None, optimistically_confirmed_bank, @@ -6873,7 +7319,7 @@ pub mod tests { Arc::new(PrioritizationFeeCache::default()), ); SendTransactionService::new::( - tpu_address, + cluster_info, &bank_forks, None, receiver, diff --git a/rpc/src/rpc_service.rs b/rpc/src/rpc_service.rs index 329de948e2..94c54ac083 100644 --- a/rpc/src/rpc_service.rs +++ b/rpc/src/rpc_service.rs @@ -255,6 +255,7 @@ impl RequestMiddleware for RpcRequestMiddleware { let full_snapshot_archive_info = snapshot_utils::get_highest_full_snapshot_archive_info( &snapshot_config.full_snapshot_archives_dir, + None, ); let snapshot_archive_info = if let Some(full_snapshot_archive_info) = full_snapshot_archive_info { @@ -264,6 +265,7 @@ impl RequestMiddleware for RpcRequestMiddleware { snapshot_utils::get_highest_incremental_snapshot_archive_info( &snapshot_config.incremental_snapshot_archives_dir, full_snapshot_archive_info.slot(), + None, ) .map(|incremental_snapshot_archive_info| { incremental_snapshot_archive_info @@ -377,11 +379,6 @@ impl JsonRpcService { LARGEST_ACCOUNTS_CACHE_DURATION, ))); - let tpu_address = cluster_info - .my_contact_info() - .tpu(connection_cache.protocol()) - .map_err(|err| format!("{err}"))?; - // sadly, some parts of our current rpc implemention block the jsonrpc's // _socket-listening_ event loop for too long, due to (blocking) long IO or intesive CPU, // causing no further processing of incoming requests and ultimatily innocent clients timing-out. @@ -480,7 +477,7 @@ impl JsonRpcService { let leader_info = poh_recorder.map(|recorder| ClusterTpuInfo::new(cluster_info.clone(), recorder)); let _send_transaction_service = Arc::new(SendTransactionService::new_with_config( - tpu_address, + cluster_info, &bank_forks, leader_info, receiver, diff --git a/runtime-plugin/Cargo.toml b/runtime-plugin/Cargo.toml new file mode 100644 index 0000000000..a2035fc8d8 --- /dev/null +++ b/runtime-plugin/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "solana-runtime-plugin" +publish = false +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +crossbeam-channel = { workspace = true } +json5 = { workspace = true } +jsonrpc-core = { workspace = true } +jsonrpc-core-client = { workspace = true, features = ["ipc"] } +jsonrpc-derive = { workspace = true } +jsonrpc-ipc-server = { workspace = true } +jsonrpc-server-utils = { workspace = true } +libloading = { workspace = true } +log = { workspace = true } +solana-runtime = { workspace = true } +solana-sdk = { workspace = true } +thiserror = { workspace = true } diff --git a/runtime-plugin/src/lib.rs b/runtime-plugin/src/lib.rs new file mode 100644 index 0000000000..477af43c9b --- /dev/null +++ b/runtime-plugin/src/lib.rs @@ -0,0 +1,4 @@ +pub mod runtime_plugin; +pub mod runtime_plugin_admin_rpc_service; +pub mod runtime_plugin_manager; +pub mod runtime_plugin_service; diff --git a/runtime-plugin/src/runtime_plugin.rs b/runtime-plugin/src/runtime_plugin.rs new file mode 100644 index 0000000000..7dc0b95fa4 --- /dev/null +++ b/runtime-plugin/src/runtime_plugin.rs @@ -0,0 +1,41 @@ +use { + solana_runtime::{bank_forks::BankForks, commitment::BlockCommitmentCache}, + std::{ + any::Any, + error, + fmt::Debug, + io, + sync::{atomic::AtomicBool, Arc, RwLock}, + }, + thiserror::Error, +}; + +pub type Result = std::result::Result; + +/// Errors returned by plugin calls +#[derive(Error, Debug)] +pub enum RuntimePluginError { + /// Error opening the configuration file; for example, when the file + /// is not found or when the validator process has no permission to read it. + #[error("Error opening config file. Error detail: ({0}).")] + ConfigFileOpenError(#[from] io::Error), + + /// Any custom error defined by the plugin. + #[error("Plugin-defined custom error. Error message: ({0})")] + Custom(Box), + + #[error("Failed to load a runtime plugin")] + FailedToLoadPlugin(#[from] Box), +} + +pub struct PluginDependencies { + pub bank_forks: Arc>, + pub block_commitment_cache: Arc>, + pub exit: Arc, +} + +pub trait RuntimePlugin: Any + Debug + Send + Sync { + fn name(&self) -> &'static str; + fn on_load(&mut self, config_file: &str, dependencies: PluginDependencies) -> Result<()>; + fn on_unload(&mut self); +} diff --git a/runtime-plugin/src/runtime_plugin_admin_rpc_service.rs b/runtime-plugin/src/runtime_plugin_admin_rpc_service.rs new file mode 100644 index 0000000000..fdc33b06c5 --- /dev/null +++ b/runtime-plugin/src/runtime_plugin_admin_rpc_service.rs @@ -0,0 +1,326 @@ +//! RPC interface to dynamically make changes to runtime plugins. + +use { + crossbeam_channel::Sender, + jsonrpc_core::{BoxFuture, ErrorCode, MetaIoHandler, Metadata, Result as JsonRpcResult}, + jsonrpc_core_client::{transports::ipc, RpcError}, + jsonrpc_derive::rpc, + jsonrpc_ipc_server::{ + tokio::{self, sync::oneshot::channel as oneshot_channel}, + RequestContext, ServerBuilder, + }, + jsonrpc_server_utils::tokio::sync::oneshot::Sender as OneShotSender, + log::*, + solana_sdk::exit::Exit, + std::{ + path::{Path, PathBuf}, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, RwLock, + }, + }, +}; + +#[derive(Debug)] +pub enum RuntimePluginManagerRpcRequest { + ReloadPlugin { + name: String, + config_file: String, + response_sender: OneShotSender>, + }, + UnloadPlugin { + name: String, + response_sender: OneShotSender>, + }, + LoadPlugin { + config_file: String, + response_sender: OneShotSender>, + }, + ListPlugins { + response_sender: OneShotSender>>, + }, +} + +#[rpc] +pub trait RuntimePluginAdminRpc { + type Metadata; + + #[rpc(meta, name = "reloadPlugin")] + fn reload_plugin( + &self, + meta: Self::Metadata, + name: String, + config_file: String, + ) -> BoxFuture>; + + #[rpc(meta, name = "unloadPlugin")] + fn unload_plugin(&self, meta: Self::Metadata, name: String) -> BoxFuture>; + + #[rpc(meta, name = "loadPlugin")] + fn load_plugin( + &self, + meta: Self::Metadata, + config_file: String, + ) -> BoxFuture>; + + #[rpc(meta, name = "listPlugins")] + fn list_plugins(&self, meta: Self::Metadata) -> BoxFuture>>; +} + +#[derive(Clone)] +pub struct RuntimePluginAdminRpcRequestMetadata { + pub rpc_request_sender: Sender, + pub validator_exit: Arc>, +} + +impl Metadata for RuntimePluginAdminRpcRequestMetadata {} + +fn rpc_path(ledger_path: &Path) -> PathBuf { + #[cfg(target_family = "windows")] + { + // More information about the wackiness of pipe names over at + // https://docs.microsoft.com/en-us/windows/win32/ipc/pipe-names + if let Some(ledger_filename) = ledger_path.file_name() { + PathBuf::from(format!( + "\\\\.\\pipe\\{}-runtime_plugin_admin.rpc", + ledger_filename.to_string_lossy() + )) + } else { + PathBuf::from("\\\\.\\pipe\\runtime_plugin_admin.rpc") + } + } + #[cfg(not(target_family = "windows"))] + { + ledger_path.join("runtime_plugin_admin.rpc") + } +} + +/// Start the Runtime Plugin Admin RPC interface. +pub fn run( + ledger_path: &Path, + metadata: RuntimePluginAdminRpcRequestMetadata, + plugin_exit: Arc, +) { + let rpc_path = rpc_path(ledger_path); + + let event_loop = tokio::runtime::Builder::new_multi_thread() + .thread_name("solRuntimePluginAdminRpc") + .worker_threads(1) + .enable_all() + .build() + .unwrap(); + + std::thread::Builder::new() + .name("solAdminRpc".to_string()) + .spawn(move || { + let mut io = MetaIoHandler::default(); + io.extend_with(RuntimePluginAdminRpcImpl.to_delegate()); + + let validator_exit = metadata.validator_exit.clone(); + + match ServerBuilder::with_meta_extractor(io, move |_req: &RequestContext| { + metadata.clone() + }) + .event_loop_executor(event_loop.handle().clone()) + .start(&format!("{}", rpc_path.display())) + { + Err(e) => { + error!("Unable to start runtime plugin admin rpc service: {e:?}, exiting"); + validator_exit.write().unwrap().exit(); + } + Ok(server) => { + info!("started runtime plugin admin rpc service!"); + let close_handle = server.close_handle(); + let c_plugin_exit = plugin_exit.clone(); + validator_exit + .write() + .unwrap() + .register_exit(Box::new(move || { + close_handle.close(); + c_plugin_exit.store(true, Ordering::Relaxed); + })); + + server.wait(); + plugin_exit.store(true, Ordering::Relaxed); + } + } + }) + .unwrap(); +} + +pub struct RuntimePluginAdminRpcImpl; +impl RuntimePluginAdminRpc for RuntimePluginAdminRpcImpl { + type Metadata = RuntimePluginAdminRpcRequestMetadata; + + fn reload_plugin( + &self, + meta: Self::Metadata, + name: String, + config_file: String, + ) -> BoxFuture> { + Box::pin(async move { + let (response_sender, response_receiver) = oneshot_channel(); + + if meta + .rpc_request_sender + .send(RuntimePluginManagerRpcRequest::ReloadPlugin { + name, + config_file, + response_sender, + }) + .is_err() + { + error!("rpc_request_sender channel closed, exiting"); + meta.validator_exit.write().unwrap().exit(); + + return Err(jsonrpc_core::Error { + code: ErrorCode::InternalError, + message: "Internal channel disconnected while sending the request".to_string(), + data: None, + }); + } + + match response_receiver.await { + Err(_) => { + error!("response_receiver channel closed, exiting"); + meta.validator_exit.write().unwrap().exit(); + Err(jsonrpc_core::Error { + code: ErrorCode::InternalError, + message: "Internal channel disconnected while awaiting the response" + .to_string(), + data: None, + }) + } + Ok(resp) => resp, + } + }) + } + + fn unload_plugin(&self, meta: Self::Metadata, name: String) -> BoxFuture> { + Box::pin(async move { + let (response_sender, response_receiver) = oneshot_channel(); + + if meta + .rpc_request_sender + .send(RuntimePluginManagerRpcRequest::UnloadPlugin { + name, + response_sender, + }) + .is_err() + { + error!("rpc_request_sender channel closed, exiting"); + meta.validator_exit.write().unwrap().exit(); + + return Err(jsonrpc_core::Error { + code: ErrorCode::InternalError, + message: "Internal channel disconnected while sending the request".to_string(), + data: None, + }); + } + + match response_receiver.await { + Err(_) => { + error!("response_receiver channel closed, exiting"); + meta.validator_exit.write().unwrap().exit(); + Err(jsonrpc_core::Error { + code: ErrorCode::InternalError, + message: "Internal channel disconnected while awaiting the response" + .to_string(), + data: None, + }) + } + Ok(resp) => resp, + } + }) + } + + fn load_plugin( + &self, + meta: Self::Metadata, + config_file: String, + ) -> BoxFuture> { + Box::pin(async move { + let (response_sender, response_receiver) = oneshot_channel(); + + if meta + .rpc_request_sender + .send(RuntimePluginManagerRpcRequest::LoadPlugin { + config_file, + response_sender, + }) + .is_err() + { + error!("rpc_request_sender channel closed, exiting"); + meta.validator_exit.write().unwrap().exit(); + + return Err(jsonrpc_core::Error { + code: ErrorCode::InternalError, + message: "Internal channel disconnected while sending the request".to_string(), + data: None, + }); + } + + match response_receiver.await { + Err(_) => { + error!("response_receiver channel closed, exiting"); + meta.validator_exit.write().unwrap().exit(); + Err(jsonrpc_core::Error { + code: ErrorCode::InternalError, + message: "Internal channel disconnected while awaiting the response" + .to_string(), + data: None, + }) + } + Ok(resp) => resp, + } + }) + } + + fn list_plugins(&self, meta: Self::Metadata) -> BoxFuture>> { + Box::pin(async move { + let (response_sender, response_receiver) = oneshot_channel(); + + if meta + .rpc_request_sender + .send(RuntimePluginManagerRpcRequest::ListPlugins { response_sender }) + .is_err() + { + error!("rpc_request_sender channel closed, exiting"); + meta.validator_exit.write().unwrap().exit(); + + return Err(jsonrpc_core::Error { + code: ErrorCode::InternalError, + message: "Internal channel disconnected while sending the request".to_string(), + data: None, + }); + } + + match response_receiver.await { + Err(_) => { + error!("response_receiver channel closed, exiting"); + meta.validator_exit.write().unwrap().exit(); + Err(jsonrpc_core::Error { + code: ErrorCode::InternalError, + message: "Internal channel disconnected while awaiting the response" + .to_string(), + data: None, + }) + } + Ok(resp) => resp, + } + }) + } +} + +// Connect to the Runtime Plugin RPC interface +pub async fn connect(ledger_path: &Path) -> Result { + let rpc_path = rpc_path(ledger_path); + if !rpc_path.exists() { + Err(RpcError::Client(format!( + "{} does not exist", + rpc_path.display() + ))) + } else { + ipc::connect::<_, gen_client::Client>(&format!("{}", rpc_path.display())).await + } +} diff --git a/runtime-plugin/src/runtime_plugin_manager.rs b/runtime-plugin/src/runtime_plugin_manager.rs new file mode 100644 index 0000000000..af1dcf2cde --- /dev/null +++ b/runtime-plugin/src/runtime_plugin_manager.rs @@ -0,0 +1,275 @@ +use { + crate::runtime_plugin::{PluginDependencies, RuntimePlugin}, + jsonrpc_core::{serde_json, ErrorCode, Result as JsonRpcResult}, + libloading::Library, + log::*, + solana_runtime::{bank_forks::BankForks, commitment::BlockCommitmentCache}, + std::{ + fs::File, + io::Read, + path::{Path, PathBuf}, + sync::{atomic::AtomicBool, Arc, RwLock}, + }, +}; + +#[derive(thiserror::Error, Debug)] +pub enum RuntimePluginManagerError { + #[error("Cannot open the the plugin config file")] + CannotOpenConfigFile(String), + + #[error("Cannot read the the plugin config file")] + CannotReadConfigFile(String), + + #[error("The config file is not in a valid Json format")] + InvalidConfigFileFormat(String), + + #[error("Plugin library path is not specified in the config file")] + LibPathNotSet, + + #[error("Invalid plugin path")] + InvalidPluginPath, + + #[error("Cannot load plugin shared library")] + PluginLoadError(String), + + #[error("The runtime plugin {0} is already loaded shared library")] + PluginAlreadyLoaded(String), + + #[error("The RuntimePlugin on_load method failed")] + PluginStartError(String), +} + +pub struct RuntimePluginManager { + plugins: Vec>, + libs: Vec, + bank_forks: Arc>, + block_commitment_cache: Arc>, + exit: Arc, +} + +impl RuntimePluginManager { + pub fn new( + bank_forks: Arc>, + block_commitment_cache: Arc>, + exit: Arc, + ) -> Self { + Self { + plugins: vec![], + libs: vec![], + bank_forks, + block_commitment_cache, + exit, + } + } + + /// This method allows dynamic loading of a runtime plugin. + /// Adds to the existing list of loaded plugins. + pub(crate) fn load_plugin( + &mut self, + plugin_config_path: impl AsRef, + ) -> JsonRpcResult { + // First load plugin + let (mut new_plugin, new_lib, config_file) = + load_plugin_from_config(plugin_config_path.as_ref()).map_err(|e| { + jsonrpc_core::Error { + code: ErrorCode::InvalidRequest, + message: format!("Failed to load plugin: {e}"), + data: None, + } + })?; + + // Then see if a plugin with this name already exists, if so return Err. + let name = new_plugin.name(); + if self.plugins.iter().any(|plugin| name.eq(plugin.name())) { + return Err(jsonrpc_core::Error { + code: ErrorCode::InvalidRequest, + message: format!( + "There already exists a plugin named {} loaded. Did not load requested plugin", + name, + ), + data: None, + }); + } + + new_plugin + .on_load( + config_file, + PluginDependencies { + bank_forks: self.bank_forks.clone(), + block_commitment_cache: self.block_commitment_cache.clone(), + exit: self.exit.clone(), + }, + ) + .map_err(|on_load_err| jsonrpc_core::Error { + code: ErrorCode::InvalidRequest, + message: format!( + "on_load method of plugin {} failed: {on_load_err}", + new_plugin.name() + ), + data: None, + })?; + + self.plugins.push(new_plugin); + self.libs.push(new_lib); + + Ok(name.to_string()) + } + + /// Unloads the plugins and loaded plugin libraries, making sure to fire + /// their `on_plugin_unload()` methods so they can do any necessary cleanup. + pub(crate) fn unload_all_plugins(&mut self) { + (0..self.plugins.len()).for_each(|idx| { + self.try_drop_plugin(idx); + }); + } + + pub(crate) fn unload_plugin(&mut self, name: &str) -> JsonRpcResult<()> { + // Check if any plugin names match this one + let Some(idx) = self + .plugins + .iter() + .position(|plugin| plugin.name().eq(name)) + else { + // If we don't find one return an error + return Err(jsonrpc_core::error::Error { + code: ErrorCode::InvalidRequest, + message: String::from("The plugin you requested to unload is not loaded"), + data: None, + }); + }; + + // Unload and drop plugin and lib + self.try_drop_plugin(idx); + + Ok(()) + } + + /// Reloads an existing plugin. + pub(crate) fn reload_plugin(&mut self, name: &str, config_file: &str) -> JsonRpcResult<()> { + // Check if any plugin names match this one + let Some(idx) = self + .plugins + .iter() + .position(|plugin| plugin.name().eq(name)) + else { + // If we don't find one return an error + return Err(jsonrpc_core::error::Error { + code: ErrorCode::InvalidRequest, + message: String::from("The plugin you requested to reload is not loaded"), + data: None, + }); + }; + + self.try_drop_plugin(idx); + + // Try to load plugin, library + // SAFETY: It is up to the validator to ensure this is a valid plugin library. + let (mut new_plugin, new_lib, new_parsed_config_file) = + load_plugin_from_config(config_file.as_ref()).map_err(|err| jsonrpc_core::Error { + code: ErrorCode::InvalidRequest, + message: err.to_string(), + data: None, + })?; + + // Attempt to on_load with new plugin + match new_plugin.on_load( + new_parsed_config_file, + PluginDependencies { + bank_forks: self.bank_forks.clone(), + block_commitment_cache: self.block_commitment_cache.clone(), + exit: self.exit.clone(), + }, + ) { + // On success, push plugin and library + Ok(()) => { + self.plugins.push(new_plugin); + self.libs.push(new_lib); + Ok(()) + } + // On failure, return error + Err(err) => Err(jsonrpc_core::error::Error { + code: ErrorCode::InvalidRequest, + message: format!( + "Failed to start new plugin (previous plugin was dropped!): {err}" + ), + data: None, + }), + } + } + + pub(crate) fn list_plugins(&self) -> JsonRpcResult> { + Ok(self.plugins.iter().map(|p| p.name().to_owned()).collect()) + } + + fn try_drop_plugin(&mut self, idx: usize) { + if idx < self.plugins.len() { + let mut plugin = self.plugins.remove(idx); + let lib = self.libs.remove(idx); + drop(lib); + plugin.on_unload(); + } else { + error!("failed to drop plugin: index {idx} out of bounds"); + } + } +} + +fn load_plugin_from_config( + plugin_config_path: &Path, +) -> Result<(Box, Library, &str), RuntimePluginManagerError> { + type PluginConstructor = unsafe fn() -> *mut dyn RuntimePlugin; + use libloading::Symbol; + + let mut file = match File::open(plugin_config_path) { + Ok(file) => file, + Err(err) => { + return Err(RuntimePluginManagerError::CannotOpenConfigFile(format!( + "Failed to open the plugin config file {plugin_config_path:?}, error: {err:?}" + ))); + } + }; + + let mut contents = String::new(); + if let Err(err) = file.read_to_string(&mut contents) { + return Err(RuntimePluginManagerError::CannotReadConfigFile(format!( + "Failed to read the plugin config file {plugin_config_path:?}, error: {err:?}" + ))); + } + + let result: serde_json::Value = match json5::from_str(&contents) { + Ok(value) => value, + Err(err) => { + return Err(RuntimePluginManagerError::InvalidConfigFileFormat(format!( + "The config file {plugin_config_path:?} is not in a valid Json5 format, error: {err:?}" + ))); + } + }; + + let libpath = result["libpath"] + .as_str() + .ok_or(RuntimePluginManagerError::LibPathNotSet)?; + let mut libpath = PathBuf::from(libpath); + if libpath.is_relative() { + let config_dir = plugin_config_path.parent().ok_or_else(|| { + RuntimePluginManagerError::CannotOpenConfigFile(format!( + "Failed to resolve parent of {plugin_config_path:?}", + )) + })?; + libpath = config_dir.join(libpath); + } + + let config_file = plugin_config_path + .as_os_str() + .to_str() + .ok_or(RuntimePluginManagerError::InvalidPluginPath)?; + + let (plugin, lib) = unsafe { + let lib = Library::new(libpath) + .map_err(|e| RuntimePluginManagerError::PluginLoadError(e.to_string()))?; + let constructor: Symbol = lib + .get(b"_create_plugin") + .map_err(|e| RuntimePluginManagerError::PluginLoadError(e.to_string()))?; + (Box::from_raw(constructor()), lib) + }; + + Ok((plugin, lib, config_file)) +} diff --git a/runtime-plugin/src/runtime_plugin_service.rs b/runtime-plugin/src/runtime_plugin_service.rs new file mode 100644 index 0000000000..5fcb625a26 --- /dev/null +++ b/runtime-plugin/src/runtime_plugin_service.rs @@ -0,0 +1,123 @@ +use { + crate::{ + runtime_plugin::RuntimePluginError, + runtime_plugin_admin_rpc_service::RuntimePluginManagerRpcRequest, + runtime_plugin_manager::RuntimePluginManager, + }, + crossbeam_channel::Receiver, + log::{error, info}, + solana_runtime::{bank_forks::BankForks, commitment::BlockCommitmentCache}, + std::{ + path::PathBuf, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, RwLock, + }, + thread::{self, JoinHandle}, + time::Duration, + }, +}; + +pub struct RuntimePluginService { + plugin_manager: Arc>, + rpc_thread: JoinHandle<()>, +} + +impl RuntimePluginService { + pub fn start( + plugin_config_files: &[PathBuf], + rpc_receiver: Receiver, + bank_forks: Arc>, + block_commitment_cache: Arc>, + exit: Arc, + ) -> Result { + let mut plugin_manager = + RuntimePluginManager::new(bank_forks, block_commitment_cache, exit.clone()); + + for config in plugin_config_files { + let name = plugin_manager + .load_plugin(config) + .map_err(|e| RuntimePluginError::FailedToLoadPlugin(e.into()))?; + info!("Loaded Runtime Plugin: {name}"); + } + + let plugin_manager = Arc::new(RwLock::new(plugin_manager)); + let rpc_thread = + Self::start_rpc_request_handler(rpc_receiver, plugin_manager.clone(), exit); + + Ok(Self { + plugin_manager, + rpc_thread, + }) + } + + pub fn join(self) { + if let Err(e) = self.rpc_thread.join() { + error!("error joining rpc thread: {e:?}"); + } + self.plugin_manager.write().unwrap().unload_all_plugins(); + } + + fn start_rpc_request_handler( + rpc_receiver: Receiver, + plugin_manager: Arc>, + exit: Arc, + ) -> JoinHandle<()> { + thread::Builder::new() + .name("solRuntimePluginRpc".to_string()) + .spawn(move || { + const TIMEOUT: Duration = Duration::from_secs(3); + while !exit.load(Ordering::Relaxed) { + if let Ok(request) = rpc_receiver.recv_timeout(TIMEOUT) { + match request { + RuntimePluginManagerRpcRequest::ListPlugins { response_sender } => { + let plugin_list = plugin_manager.read().unwrap().list_plugins(); + if response_sender.send(plugin_list).is_err() { + error!("response_sender channel disconnected"); + return; + } + } + RuntimePluginManagerRpcRequest::ReloadPlugin { + ref name, + ref config_file, + response_sender, + } => { + let reload_result = plugin_manager + .write() + .unwrap() + .reload_plugin(name, config_file); + if response_sender.send(reload_result).is_err() { + error!("response_sender channel disconnected"); + return; + } + } + RuntimePluginManagerRpcRequest::LoadPlugin { + ref config_file, + response_sender, + } => { + let load_result = + plugin_manager.write().unwrap().load_plugin(config_file); + if response_sender.send(load_result).is_err() { + error!("response_sender channel disconnected"); + return; + } + } + RuntimePluginManagerRpcRequest::UnloadPlugin { + ref name, + response_sender, + } => { + let unload_result = + plugin_manager.write().unwrap().unload_plugin(name); + if response_sender.send(unload_result).is_err() { + error!("response_sender channel disconnected"); + return; + } + } + } + } + } + plugin_manager.write().unwrap().unload_all_plugins(); + }) + .unwrap() + } +} diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 4f2260f72e..3b374c663e 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -69,8 +69,8 @@ use { solana_accounts_db::{ account_overrides::AccountOverrides, accounts::{ - AccountAddressFilter, Accounts, LoadedTransaction, PubkeyAccountSlot, RewardInterval, - TransactionLoadResult, + AccountAddressFilter, AccountLocks, Accounts, LoadedTransaction, PubkeyAccountSlot, + RewardInterval, TransactionLoadResult, }, accounts_db::{ AccountShrinkThreshold, AccountStorageEntry, AccountsDbConfig, @@ -312,6 +312,7 @@ enum ProgramAccountLoadResult { ProgramOfLoaderV4(AccountSharedData, Slot), } +#[derive(Debug)] pub struct LoadAndExecuteTransactionsOutput { pub loaded_transactions: Vec, // Vector of results indicating whether a transaction was executed or could not @@ -329,6 +330,22 @@ pub struct LoadAndExecuteTransactionsOutput { pub error_counters: TransactionErrorMetrics, } +#[derive(Clone, Debug, PartialEq)] +pub struct AccountData { + pub pubkey: Pubkey, + pub data: AccountSharedData, +} + +#[derive(Clone)] +pub struct BundleTransactionSimulationResult { + pub result: Result<()>, + pub logs: TransactionLogMessages, + pub pre_execution_accounts: Option>, + pub post_execution_accounts: Option>, + pub return_data: Option, + pub units_consumed: u64, +} + pub struct TransactionSimulationResult { pub result: Result<()>, pub logs: TransactionLogMessages, @@ -760,7 +777,7 @@ pub struct Bank { inflation: Arc>, /// cache of vote_account and stake_account state for this fork - stakes_cache: StakesCache, + pub stakes_cache: StakesCache, /// staked nodes on epoch boundaries, saved off when a bank.slot() is at /// a leader schedule calculation boundary @@ -4311,17 +4328,61 @@ impl Bank { &'a self, transactions: &'b [SanitizedTransaction], transaction_results: impl Iterator>, + additional_read_locks: &HashSet, + additional_write_locks: &HashSet, ) -> TransactionBatch<'a, 'b> { - // this lock_results could be: Ok, AccountInUse, WouldExceedBlockMaxLimit or WouldExceedAccountMaxLimit let tx_account_lock_limit = self.get_transaction_account_lock_limit(); let lock_results = self.rc.accounts.lock_accounts_with_results( transactions.iter(), transaction_results, tx_account_lock_limit, + additional_read_locks, + additional_write_locks, ); TransactionBatch::new(lock_results, self, Cow::Borrowed(transactions)) } + /// Prepare a locked transaction batch from a list of sanitized transactions, and their cost + /// limited packing status, where transactions will be locked sequentially until the first failure + pub fn prepare_sequential_sanitized_batch_with_results<'a, 'b>( + &'a self, + transactions: &'b [SanitizedTransaction], + ) -> TransactionBatch<'a, 'b> { + // this lock_results could be: Ok, AccountInUse, AccountLoadedTwice, or TooManyAccountLocks + let tx_account_lock_limit = self.get_transaction_account_lock_limit(); + let lock_results = self + .rc + .accounts + .lock_accounts_sequential_with_results(transactions.iter(), tx_account_lock_limit); + TransactionBatch::new(lock_results, self, Cow::Borrowed(transactions)) + } + + /// Prepare a locked transaction batch from a list of sanitized transactions for simulation. + /// This grabs as many sequential account locks that it can without a RW conflict. However, + /// it uses a temporary version of AccountLocks and not the Bank's account locks, so one can + /// use this during simulation on an unfrozen Bank without worrying about impacting the RW + /// lock usage in replay + pub fn prepare_sequential_sanitized_batch_with_results_for_simulation<'a, 'b>( + &'a self, + transactions: &'b [SanitizedTransaction], + ) -> TransactionBatch<'a, 'b> { + let tx_account_lock_limit = self.get_transaction_account_lock_limit(); + let tx_account_locks_results: Vec> = transactions + .iter() + .map(|tx| tx.get_account_locks(tx_account_lock_limit)) + .collect(); + + let mut account_locks = AccountLocks::default(); + let lock_results = + Accounts::lock_accounts_sequential(&mut account_locks, tx_account_locks_results); + let mut batch = TransactionBatch::new(lock_results, self, Cow::Borrowed(transactions)); + // this is required to ensure that accounts aren't unlocked accidentally, which can be problematic during replay. + // more specifically, during process_entries, if the lock counts are accidentally decremented, + // one might end up replaying a block incorrectly + batch.set_needs_unlock(false); + batch + } + /// Prepare a transaction batch from a single transaction without locking accounts pub(crate) fn prepare_unlocked_batch_from_single_tx<'a>( &'a self, @@ -4638,6 +4699,29 @@ impl Bank { } } + pub fn collect_balances_with_cache( + &self, + batch: &TransactionBatch, + account_overrides: Option<&AccountOverrides>, + ) -> TransactionBalances { + let mut balances: TransactionBalances = vec![]; + for transaction in batch.sanitized_transactions() { + let mut transaction_balances: Vec = vec![]; + for account_key in transaction.message().account_keys().iter() { + let balance = match account_overrides { + None => self.get_balance(account_key), + Some(overrides) => match overrides.get(account_key) { + None => self.get_balance(account_key), + Some(account_data) => account_data.lamports(), + }, + }; + transaction_balances.push(balance); + } + balances.push(transaction_balances); + } + balances + } + fn load_program_accounts(&self, pubkey: &Pubkey) -> ProgramAccountLoadResult { let program_account = match self.get_account_with_fixed_root(pubkey) { None => return ProgramAccountLoadResult::AccountNotFound, @@ -5733,6 +5817,26 @@ impl Bank { } } + pub fn collect_accounts_to_store<'a>( + &self, + txs: &'a [SanitizedTransaction], + res: &'a [TransactionExecutionResult], + loaded: &'a mut [TransactionLoadResult], + ) -> Vec<(&'a Pubkey, &'a AccountSharedData)> { + let (last_blockhash, lamports_per_signature) = + self.last_blockhash_and_lamports_per_signature(); + let durable_nonce = DurableNonce::from_blockhash(&last_blockhash); + Accounts::collect_accounts_to_store( + txs, + res, + loaded, + &self.rent_collector, + &durable_nonce, + lamports_per_signature, + ) + .0 + } + fn collect_rent( &self, execution_results: &[TransactionExecutionResult], diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index 9f237def74..3d5751deba 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -223,12 +223,13 @@ pub fn bank_fields_from_snapshot_archives( incremental_snapshot_archives_dir: impl AsRef, ) -> snapshot_utils::Result { let full_snapshot_archive_info = - get_highest_full_snapshot_archive_info(&full_snapshot_archives_dir) + get_highest_full_snapshot_archive_info(&full_snapshot_archives_dir, None) .ok_or(SnapshotError::NoSnapshotArchives)?; let incremental_snapshot_archive_info = get_highest_incremental_snapshot_archive_info( &incremental_snapshot_archives_dir, full_snapshot_archive_info.slot(), + None, ); let temp_unpack_dir = TempDir::new()?; @@ -440,12 +441,13 @@ pub fn bank_from_latest_snapshot_archives( Option, )> { let full_snapshot_archive_info = - get_highest_full_snapshot_archive_info(&full_snapshot_archives_dir) + get_highest_full_snapshot_archive_info(&full_snapshot_archives_dir, None) .ok_or(SnapshotError::NoSnapshotArchives)?; let incremental_snapshot_archive_info = get_highest_incremental_snapshot_archive_info( &incremental_snapshot_archives_dir, full_snapshot_archive_info.slot(), + None, ); let (bank, _) = bank_from_snapshot_archives( diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 0cf1aab09d..098c38966a 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -1732,8 +1732,9 @@ pub fn get_incremental_snapshot_archives( /// Get the highest slot of the full snapshot archives in a directory pub fn get_highest_full_snapshot_archive_slot( full_snapshot_archives_dir: impl AsRef, + halt_at_slot: Option, ) -> Option { - get_highest_full_snapshot_archive_info(full_snapshot_archives_dir) + get_highest_full_snapshot_archive_info(full_snapshot_archives_dir, halt_at_slot) .map(|full_snapshot_archive_info| full_snapshot_archive_info.slot()) } @@ -1742,10 +1743,12 @@ pub fn get_highest_full_snapshot_archive_slot( pub fn get_highest_incremental_snapshot_archive_slot( incremental_snapshot_archives_dir: impl AsRef, full_snapshot_slot: Slot, + halt_at_slot: Option, ) -> Option { get_highest_incremental_snapshot_archive_info( incremental_snapshot_archives_dir, full_snapshot_slot, + halt_at_slot, ) .map(|incremental_snapshot_archive_info| incremental_snapshot_archive_info.slot()) } @@ -1753,8 +1756,13 @@ pub fn get_highest_incremental_snapshot_archive_slot( /// Get the path (and metadata) for the full snapshot archive with the highest slot in a directory pub fn get_highest_full_snapshot_archive_info( full_snapshot_archives_dir: impl AsRef, + halt_at_slot: Option, ) -> Option { let mut full_snapshot_archives = get_full_snapshot_archives(full_snapshot_archives_dir); + if let Some(halt_at_slot) = halt_at_slot { + full_snapshot_archives + .retain(|archive| archive.snapshot_archive_info().slot <= halt_at_slot); + } full_snapshot_archives.sort_unstable(); full_snapshot_archives.into_iter().next_back() } @@ -1764,6 +1772,7 @@ pub fn get_highest_full_snapshot_archive_info( pub fn get_highest_incremental_snapshot_archive_info( incremental_snapshot_archives_dir: impl AsRef, full_snapshot_slot: Slot, + halt_at_slot: Option, ) -> Option { // Since we want to filter down to only the incremental snapshot archives that have the same // full snapshot slot as the value passed in, perform the filtering before sorting to avoid @@ -1775,6 +1784,9 @@ pub fn get_highest_incremental_snapshot_archive_info( incremental_snapshot_archive_info.base_slot() == full_snapshot_slot }) .collect::>(); + if let Some(halt_at_slot) = halt_at_slot { + incremental_snapshot_archives.retain(|archive| archive.slot() <= halt_at_slot); + } incremental_snapshot_archives.sort_unstable(); incremental_snapshot_archives.into_iter().next_back() } @@ -2756,7 +2768,7 @@ mod tests { ); assert_eq!( - get_highest_full_snapshot_archive_slot(full_snapshot_archives_dir.path()), + get_highest_full_snapshot_archive_slot(full_snapshot_archives_dir.path(), None), Some(max_slot - 1) ); } @@ -2782,7 +2794,8 @@ mod tests { assert_eq!( get_highest_incremental_snapshot_archive_slot( incremental_snapshot_archives_dir.path(), - full_snapshot_slot + full_snapshot_slot, + None, ), Some(max_incremental_snapshot_slot - 1) ); @@ -2791,7 +2804,8 @@ mod tests { assert_eq!( get_highest_incremental_snapshot_archive_slot( incremental_snapshot_archives_dir.path(), - max_full_snapshot_slot + max_full_snapshot_slot, + None, ), None ); diff --git a/runtime/src/stake_account.rs b/runtime/src/stake_account.rs index 7ee3c96c44..2dacfd6b6b 100644 --- a/runtime/src/stake_account.rs +++ b/runtime/src/stake_account.rs @@ -41,14 +41,14 @@ impl StakeAccount { } #[inline] - pub(crate) fn stake_state(&self) -> &StakeStateV2 { + pub fn stake_state(&self) -> &StakeStateV2 { &self.stake_state } } impl StakeAccount { #[inline] - pub(crate) fn delegation(&self) -> Delegation { + pub fn delegation(&self) -> Delegation { // Safe to unwrap here because StakeAccount will always // only wrap a stake-state which is a delegation. self.stake_state.delegation().unwrap() diff --git a/runtime/src/stakes.rs b/runtime/src/stakes.rs index 977c25b180..662dd5d614 100644 --- a/runtime/src/stakes.rs +++ b/runtime/src/stakes.rs @@ -48,17 +48,17 @@ pub enum InvalidCacheEntryReason { WrongOwner, } -type StakeAccount = stake_account::StakeAccount; +pub type StakeAccount = stake_account::StakeAccount; #[derive(Default, Debug, AbiExample)] -pub(crate) struct StakesCache(RwLock>); +pub struct StakesCache(RwLock>); impl StakesCache { pub(crate) fn new(stakes: Stakes) -> Self { Self(RwLock::new(stakes)) } - pub(crate) fn stakes(&self) -> RwLockReadGuard> { + pub fn stakes(&self) -> RwLockReadGuard> { self.0.read().unwrap() } @@ -185,7 +185,7 @@ pub struct Stakes { vote_accounts: VoteAccounts, /// stake_delegations - stake_delegations: ImHashMap, + pub stake_delegations: ImHashMap, /// unused unused: u64, @@ -225,7 +225,7 @@ impl Stakes { /// full account state for respective stake pubkeys. get_account function /// should return the account at the respective slot where stakes where /// cached. - pub(crate) fn new(stakes: &Stakes, get_account: F) -> Result + pub fn new(stakes: &Stakes, get_account: F) -> Result where F: Fn(&Pubkey) -> Option, { @@ -458,7 +458,7 @@ impl Stakes { ); } - pub(crate) fn stake_delegations(&self) -> &ImHashMap { + pub fn stake_delegations(&self) -> &ImHashMap { &self.stake_delegations } diff --git a/runtime/src/transaction_batch.rs b/runtime/src/transaction_batch.rs index 66711fd5a1..f74158c731 100644 --- a/runtime/src/transaction_batch.rs +++ b/runtime/src/transaction_batch.rs @@ -1,6 +1,6 @@ use { crate::bank::Bank, - solana_sdk::transaction::{Result, SanitizedTransaction}, + solana_sdk::transaction::{Result, SanitizedTransaction, TransactionError}, std::borrow::Cow, }; @@ -46,6 +46,28 @@ impl<'a, 'b> TransactionBatch<'a, 'b> { pub fn needs_unlock(&self) -> bool { self.needs_unlock } + + /// Bundle locking failed if lock result returns something other than ok or AccountInUse + pub fn check_bundle_lock_results(&self) -> Option<(&SanitizedTransaction, &TransactionError)> { + self.sanitized_transactions() + .iter() + .zip(self.lock_results.iter()) + .find(|(_, lock_result)| { + !matches!(lock_result, Ok(()) | Err(TransactionError::AccountInUse)) + }) + .map(|(transaction, lock_result)| { + ( + transaction, + match lock_result { + Ok(_) => { + // safe here bc the above find will never return Ok + unreachable!() + } + Err(lock_error) => lock_error, + }, + ) + }) + } } // Unlock all locked accounts in destructor. diff --git a/rustfmt.toml b/rustfmt.toml index e26d07f0d8..c7ccd48750 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1,2 +1,7 @@ imports_granularity = "One" group_imports = "One" + +ignore = [ + "jito-programs", + "anchor" +] \ No newline at end of file diff --git a/s b/s new file mode 100755 index 0000000000..308133d227 --- /dev/null +++ b/s @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)" + +if [ -f .env ]; then + export $(cat .env | grep -v '#' | awk '/=/ {print $1}') +else + echo "Missing .env file" + exit 0 +fi + +echo "Syncing to host: $HOST" + +# sync to build server, ignoring local builds and local/remote dev ledger +rsync -avh --delete --exclude target --exclude docker-output "$SCRIPT_DIR" "$HOST":~/ diff --git a/scripts/increment-cargo-version.sh b/scripts/increment-cargo-version.sh index 1cadfc4bdd..c383f244dd 100755 --- a/scripts/increment-cargo-version.sh +++ b/scripts/increment-cargo-version.sh @@ -23,6 +23,8 @@ ignores=( .cargo target node_modules + jito-programs + anchor ) not_paths=() diff --git a/scripts/run.sh b/scripts/run.sh index 699bfce3e2..3eedad3585 100755 --- a/scripts/run.sh +++ b/scripts/run.sh @@ -102,6 +102,10 @@ args=( --identity "$validator_identity" --vote-account "$validator_vote_account" --ledger "$ledgerDir" + --tip-payment-program-pubkey "T1pyyaTNZsKv2WcRAB8oVnk93mLJw2XzjtVYqCsaHqt" + --tip-distribution-program-pubkey "4R3gSG8BpU4t19KYj8CfnbtRpnT8gtk4dvTHxVRwc2r7" + --merkle-root-upload-authority "$validator_identity" + --commission-bps 0 --gossip-port 8001 --full-rpc-api --rpc-port 8899 diff --git a/scripts/solana-install-deploy.sh b/scripts/solana-install-deploy.sh index ea77ca34bc..0f141a32cb 100755 --- a/scripts/solana-install-deploy.sh +++ b/scripts/solana-install-deploy.sh @@ -57,10 +57,10 @@ esac case $TAG in edge|beta) - DOWNLOAD_URL=https://release.solana.com/"$TAG"/solana-release-$TARGET.tar.bz2 + DOWNLOAD_URL=https://release.jito.wtf/"$TAG"/solana-release-$TARGET.tar.bz2 ;; *) - DOWNLOAD_URL=https://github.com/solana-labs/solana/releases/download/"$TAG"/solana-release-$TARGET.tar.bz2 + DOWNLOAD_URL=https://github.com/jito-foundation/jito-solana/releases/download/"$TAG"/solana-release-$TARGET.tar.bz2 ;; esac diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index 061b16cb53..50460102ae 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -38,6 +38,7 @@ full = [ dev-context-only-utils = [] [dependencies] +anchor-lang = { workspace = true } assert_matches = { workspace = true, optional = true } base64 = { workspace = true } bincode = { workspace = true } diff --git a/sdk/src/bundle/mod.rs b/sdk/src/bundle/mod.rs new file mode 100644 index 0000000000..3c02a59f9f --- /dev/null +++ b/sdk/src/bundle/mod.rs @@ -0,0 +1,33 @@ +#![cfg(feature = "full")] + +use { + crate::transaction::{SanitizedTransaction, VersionedTransaction}, + digest::Digest, + itertools::Itertools, + sha2::Sha256, +}; + +#[derive(Debug, PartialEq, Default, Eq, Clone, Serialize, Deserialize)] +pub struct VersionedBundle { + pub transactions: Vec, +} + +#[derive(Clone, Debug)] +pub struct SanitizedBundle { + pub transactions: Vec, + pub bundle_id: String, +} + +pub fn derive_bundle_id(transactions: &[VersionedTransaction]) -> String { + let mut hasher = Sha256::new(); + hasher.update(transactions.iter().map(|tx| tx.signatures[0]).join(",")); + format!("{:x}", hasher.finalize()) +} + +pub fn derive_bundle_id_from_sanitized_transactions( + transactions: &[SanitizedTransaction], +) -> String { + let mut hasher = Sha256::new(); + hasher.update(transactions.iter().map(|tx| tx.signature()).join(",")); + format!("{:x}", hasher.finalize()) +} diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index 825c09e2c3..fda85fda5f 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -59,6 +59,7 @@ pub use solana_program::{ pub mod account; pub mod account_utils; +pub mod bundle; pub mod client; pub mod commitment_config; pub mod compute_budget; diff --git a/send-transaction-service/Cargo.toml b/send-transaction-service/Cargo.toml index 71431037f5..247fd4d950 100644 --- a/send-transaction-service/Cargo.toml +++ b/send-transaction-service/Cargo.toml @@ -13,6 +13,7 @@ edition = { workspace = true } crossbeam-channel = { workspace = true } log = { workspace = true } solana-client = { workspace = true } +solana-gossip = { workspace = true } solana-measure = { workspace = true } solana-metrics = { workspace = true } solana-runtime = { workspace = true } @@ -21,6 +22,7 @@ solana-tpu-client = { workspace = true } [dev-dependencies] solana-logger = { workspace = true } +solana-streamer = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/send-transaction-service/src/send_transaction_service.rs b/send-transaction-service/src/send_transaction_service.rs index 27aa1bea40..56817ccd1c 100644 --- a/send-transaction-service/src/send_transaction_service.rs +++ b/send-transaction-service/src/send_transaction_service.rs @@ -6,6 +6,7 @@ use { connection_cache::{ConnectionCache, Protocol}, tpu_connection::TpuConnection, }, + solana_gossip::cluster_info::ClusterInfo, solana_measure::measure::Measure, solana_metrics::datapoint_warn, solana_runtime::{bank::Bank, bank_forks::BankForks}, @@ -328,7 +329,7 @@ const SEND_TRANSACTION_METRICS_REPORT_RATE_MS: u64 = 5000; impl SendTransactionService { pub fn new( - tpu_address: SocketAddr, + cluster_info: Arc, bank_forks: &Arc>, leader_info: Option, receiver: Receiver, @@ -343,7 +344,7 @@ impl SendTransactionService { ..Config::default() }; Self::new_with_config( - tpu_address, + cluster_info, bank_forks, leader_info, receiver, @@ -354,7 +355,7 @@ impl SendTransactionService { } pub fn new_with_config( - tpu_address: SocketAddr, + cluster_info: Arc, bank_forks: &Arc>, leader_info: Option, receiver: Receiver, @@ -369,7 +370,7 @@ impl SendTransactionService { let leader_info_provider = Arc::new(Mutex::new(CurrentLeaderInfo::new(leader_info))); let receive_txn_thread = Self::receive_txn_thread( - tpu_address, + cluster_info.clone(), receiver, leader_info_provider.clone(), connection_cache.clone(), @@ -380,7 +381,7 @@ impl SendTransactionService { ); let retry_thread = Self::retry_thread( - tpu_address, + cluster_info, bank_forks.clone(), leader_info_provider, connection_cache.clone(), @@ -398,7 +399,7 @@ impl SendTransactionService { /// Thread responsible for receiving transactions from RPC clients. fn receive_txn_thread( - tpu_address: SocketAddr, + cluster_info: Arc, receiver: Receiver, leader_info_provider: Arc>>, connection_cache: Arc, @@ -459,6 +460,10 @@ impl SendTransactionService { stats .sent_transactions .fetch_add(transactions.len() as u64, Ordering::Relaxed); + let tpu_address = cluster_info + .my_contact_info() + .tpu(connection_cache.protocol()) + .unwrap(); Self::send_transactions_in_batch( &tpu_address, &transactions, @@ -505,7 +510,7 @@ impl SendTransactionService { /// Thread responsible for retrying transactions fn retry_thread( - tpu_address: SocketAddr, + cluster_info: Arc, bank_forks: Arc>, leader_info_provider: Arc>>, connection_cache: Arc, @@ -538,7 +543,10 @@ impl SendTransactionService { let bank_forks = bank_forks.read().unwrap(); (bank_forks.root_bank(), bank_forks.working_bank()) }; - + let tpu_address = cluster_info + .my_contact_info() + .tpu(connection_cache.protocol()) + .unwrap(); let _result = Self::process_transactions( &working_bank, &root_bank, @@ -811,27 +819,40 @@ mod test { super::*, crate::tpu_info::NullTpuInfo, crossbeam_channel::{bounded, unbounded}, + solana_gossip::contact_info::ContactInfo, solana_sdk::{ account::AccountSharedData, genesis_config::create_genesis_config, nonce::{self, state::DurableNonce}, pubkey::Pubkey, - signature::Signer, + signature::{Keypair, Signer}, system_program, system_transaction, + timing::timestamp, }, + solana_streamer::socket::SocketAddrSpace, std::ops::Sub, }; + fn new_test_cluster_info() -> Arc { + let keypair = Arc::new(Keypair::new()); + let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), timestamp()); + Arc::new(ClusterInfo::new( + contact_info, + keypair, + SocketAddrSpace::Unspecified, + )) + } + #[test] fn service_exit() { - let tpu_address = "127.0.0.1:0".parse().unwrap(); let bank = Bank::default_for_tests(); let bank_forks = BankForks::new_rw_arc(bank); let (sender, receiver) = unbounded(); let connection_cache = Arc::new(ConnectionCache::new("connection_cache_test")); + let cluster_info = new_test_cluster_info(); let send_transaction_service = SendTransactionService::new::( - tpu_address, + cluster_info, &bank_forks, None, receiver, @@ -847,7 +868,7 @@ mod test { #[test] fn validator_exit() { - let tpu_address = "127.0.0.1:0".parse().unwrap(); + let cluster_info = new_test_cluster_info(); let bank = Bank::default_for_tests(); let bank_forks = BankForks::new_rw_arc(bank); let (sender, receiver) = bounded(0); @@ -865,7 +886,7 @@ mod test { let exit = Arc::new(AtomicBool::new(false)); let connection_cache = Arc::new(ConnectionCache::new("connection_cache_test")); let _send_transaction_service = SendTransactionService::new::( - tpu_address, + cluster_info, &bank_forks, None, receiver, diff --git a/start b/start new file mode 100755 index 0000000000..c2f35e272a --- /dev/null +++ b/start @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +set -eu + +SOLANA_CONFIG_DIR=./config + +mkdir -p $SOLANA_CONFIG_DIR +NDEBUG=1 ./multinode-demo/setup.sh +cargo run --release --bin solana-ledger-tool -- -l config/bootstrap-validator/ create-snapshot 0 +NDEBUG=1 ./multinode-demo/faucet.sh diff --git a/start_multi b/start_multi new file mode 100755 index 0000000000..66de0032dc --- /dev/null +++ b/start_multi @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +set -eu + +SOLANA_KEYGEN="cargo run --release --bin solana-keygen --" +SOLANA_CONFIG_DIR=./config + +if [[ ! -d $SOLANA_CONFIG_DIR ]]; then + echo "New Config! Generating Identities" + mkdir $SOLANA_CONFIG_DIR + $SOLANA_KEYGEN new --no-passphrase -so "$SOLANA_CONFIG_DIR"/a/identity.json + $SOLANA_KEYGEN new --no-passphrase -so "$SOLANA_CONFIG_DIR"/a/stake-account.json + $SOLANA_KEYGEN new --no-passphrase -so "$SOLANA_CONFIG_DIR"/a/vote-account.json + + $SOLANA_KEYGEN new --no-passphrase -so "$SOLANA_CONFIG_DIR"/b/identity.json + $SOLANA_KEYGEN new --no-passphrase -so "$SOLANA_CONFIG_DIR"/b/stake-account.json + $SOLANA_KEYGEN new --no-passphrase -so "$SOLANA_CONFIG_DIR"/b/vote-account.json +fi + +NDEBUG=1 ./multinode-demo/setup.sh \ + --bootstrap-validator \ + "$SOLANA_CONFIG_DIR"/a/identity.json \ + "$SOLANA_CONFIG_DIR"/a/vote-account.json \ + "$SOLANA_CONFIG_DIR"/a/stake-account.json \ + --bootstrap-validator \ + "$SOLANA_CONFIG_DIR"/b/identity.json \ + "$SOLANA_CONFIG_DIR"/b/vote-account.json \ + "$SOLANA_CONFIG_DIR"/b/stake-account.json + +cargo run --bin solana-ledger-tool -- -l config/bootstrap-validator/ create-snapshot 0 +NDEBUG=1 ./multinode-demo/faucet.sh diff --git a/test-validator/src/lib.rs b/test-validator/src/lib.rs index e807f80c96..19b02a4016 100644 --- a/test-validator/src/lib.rs +++ b/test-validator/src/lib.rs @@ -986,6 +986,7 @@ impl TestValidator { DEFAULT_TPU_CONNECTION_POOL_SIZE, config.tpu_enable_udp, config.admin_rpc_service_post_init.clone(), + None, )?); // Needed to avoid panics in `solana-responder-gossip` in tests that create a number of diff --git a/tip-distributor/Cargo.toml b/tip-distributor/Cargo.toml new file mode 100644 index 0000000000..7ab20b3f7a --- /dev/null +++ b/tip-distributor/Cargo.toml @@ -0,0 +1,60 @@ +[package] +name = "solana-tip-distributor" +version = { workspace = true } +edition = { workspace = true } +license = { workspace = true } +description = "Collection of binaries used to distribute MEV rewards to delegators and validators." +publish = false + +[dependencies] +anchor-lang = { workspace = true } +clap = { version = "4.1.11", features = ["derive", "env"] } +crossbeam-channel = { workspace = true } +env_logger = { workspace = true } +futures = { workspace = true } +gethostname = { workspace = true } +im = { workspace = true } +itertools = { workspace = true } +jito-tip-distribution = { workspace = true } +jito-tip-payment = { workspace = true } +log = { workspace = true } +num-traits = { workspace = true } +rand = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +solana-accounts-db = { workspace = true } +solana-client = { workspace = true } +solana-genesis-utils = { workspace = true } +solana-ledger = { workspace = true } +solana-measure = { workspace = true } +solana-merkle-tree = { workspace = true } +solana-metrics = { workspace = true } +solana-program = { workspace = true } +solana-program-runtime = { workspace = true } +solana-rpc-client-api = { workspace = true } +solana-runtime = { workspace = true } +solana-sdk = { workspace = true } +solana-stake-program = { workspace = true } +solana-transaction-status = { workspace = true } +solana-vote = { workspace = true } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } + +[dev-dependencies] +solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } + +[[bin]] +name = "solana-stake-meta-generator" +path = "src/bin/stake-meta-generator.rs" + +[[bin]] +name = "solana-merkle-root-generator" +path = "src/bin/merkle-root-generator.rs" + +[[bin]] +name = "solana-merkle-root-uploader" +path = "src/bin/merkle-root-uploader.rs" + +[[bin]] +name = "solana-claim-mev-tips" +path = "src/bin/claim-mev-tips.rs" diff --git a/tip-distributor/README.md b/tip-distributor/README.md new file mode 100644 index 0000000000..fec682879a --- /dev/null +++ b/tip-distributor/README.md @@ -0,0 +1,52 @@ +# Tip Distributor +This library and collection of binaries are responsible for generating and uploading merkle roots to the on-chain +tip-distribution program found [here](https://github.com/jito-foundation/jito-programs/blob/submodule/tip-payment/programs/tip-distribution/src/lib.rs). + +## Background +Each individual validator is assigned a new PDA per epoch where their share of tips, in lamports, will be stored. +At the end of the epoch it's expected that validators take a commission and then distribute the rest of the funds +to their delegators such that delegators receive rewards proportional to their respective delegations. The distribution +mechanism is via merkle proofs similar to how airdrops work. + +The merkle roots are calculated off-chain and uploaded to the validator's **TipDistributionAccount** PDA. Validators may +elect an account to upload the merkle roots on their behalf. Once uploaded, users can invoke the **claim** instruction +and receive the rewards they're entitled to. Once all funds are claimed by users the validator can close the account and +refunded the rent. + +## Scripts + +### stake-meta-generator + +This script generates a JSON file identifying individual stake delegations to a validator, along with amount of lamports +in each validator's **TipDistributionAccount**. All validators will be contained in the JSON list, regardless of whether +the validator is a participant in the system; participant being indicative of running the jito-solana client to accept tips +having initialized a **TipDistributionAccount** PDA account for the epoch. + +One edge case that we've taken into account is the last validator in an epoch N receives tips but those tips don't get transferred +out into the PDA until some slot in epoch N + 1. Due to this we cannot rely on the bank's state at epoch N for lamports amount +in the PDAs. We use the bank solely to take a snapshot of delegations, but an RPC node to fetch the PDA lamports for more up-to-date data. + +### merkle-root-generator +This script accepts a path to the above JSON file as one of its arguments, and generates a merkle-root into a JSON file. + +### merkle-root-uploader +Uploads the root on-chain. + +### claim-mev-tips +This reads the file outputted by `merkle-root-generator` and finds all eligible accounts to receive mev tips. Transactions +are created and sent to the RPC server. + + +## How it works? +In order to use this library as the merkle root creator one must follow the following steps: +1. Download a ledger snapshot containing the slot of interest, i.e. the last slot in an epoch. The Solana foundation has snapshots that can be found [here](https://console.cloud.google.com/storage/browser/mainnet-beta-ledger-us-ny5). +2. Download the snapshot onto your worker machine (where this script will run). +3. Run `solana-ledger-tool -l ${PATH_TO_LEDGER} create-snapshot ${YOUR_SLOT} ${WHERE_TO_CREATE_SNAPSHOT}` + 1. The snapshot created at `${WHERE_TO_CREATE_SNAPSHOT}` will have the highest slot of `${YOUR_SLOT}`, assuming you downloaded the correct snapshot. +4. Run `stake-meta-generator --ledger-path ${WHERE_TO_CREATE_SNAPSHOT} --tip-distribution-program-id ${PUBKEY} --out-path ${JSON_OUT_PATH} --snapshot-slot ${SLOT} --rpc-url ${URL}` + 1. Note: `${WHERE_TO_CREATE_SNAPSHOT}` must be the same in steps 3 & 4. +5. Run `merkle-root-generator --stake-meta-coll-path ${STAKE_META_COLLECTION_JSON} --rpc-url ${URL} --out-path ${MERKLE_ROOT_PATH}` +6. Run `merkle-root-uploader --out-path ${MERKLE_ROOT_PATH} --keypair-path ${KEYPAIR_PATH} --rpc-url ${URL} --tip-distribution-program-id ${PROGRAM_ID}` +7. Run `solana-claim-mev-tips --merkle-trees-path /solana/ledger/autosnapshot/merkle-tree-221615999.json --rpc-url ${URL} --tip-distribution-program-id ${PROGRAM_ID} --keypair-path ${KEYPAIR_PATH}` + +Voila! diff --git a/tip-distributor/src/bin/claim-mev-tips.rs b/tip-distributor/src/bin/claim-mev-tips.rs new file mode 100644 index 0000000000..dd57db9231 --- /dev/null +++ b/tip-distributor/src/bin/claim-mev-tips.rs @@ -0,0 +1,190 @@ +//! This binary claims MEV tips. +use { + clap::Parser, + futures::future::join_all, + gethostname::gethostname, + log::*, + solana_metrics::{datapoint_error, datapoint_info, set_host_id}, + solana_sdk::{ + pubkey::Pubkey, + signature::{read_keypair_file, Keypair}, + }, + solana_tip_distributor::{ + claim_mev_workflow::{claim_mev_tips, ClaimMevError}, + read_json_from_file, + reclaim_rent_workflow::reclaim_rent, + GeneratedMerkleTreeCollection, + }, + std::{ + path::PathBuf, + sync::Arc, + time::{Duration, Instant}, + }, +}; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + /// Path to JSON file containing the [GeneratedMerkleTreeCollection] object. + #[arg(long, env)] + merkle_trees_path: PathBuf, + + /// RPC to send transactions through + #[arg(long, env, default_value = "http://localhost:8899")] + rpc_url: String, + + /// Tip distribution program ID + #[arg(long, env)] + tip_distribution_program_id: Pubkey, + + /// Path to keypair + #[arg(long, env)] + keypair_path: PathBuf, + + /// Limits how long before send loop runs before stopping + #[arg(long, env, default_value_t = 60 * 60)] + max_retry_duration_secs: u64, + + /// Specifies whether to reclaim any rent. + #[arg(long, env, default_value_t = true)] + should_reclaim_rent: bool, + + /// Specifies whether to reclaim rent on behalf of validators from respective TDAs. + #[arg(long, env)] + should_reclaim_tdas: bool, + + /// The price to pay for priority fee + #[arg(long, env, default_value_t = 1)] + micro_lamports: u64, +} + +async fn start_mev_claim_process( + merkle_trees: GeneratedMerkleTreeCollection, + rpc_url: String, + tip_distribution_program_id: Pubkey, + signer: Arc, + max_loop_duration: Duration, + micro_lamports: u64, +) -> Result<(), ClaimMevError> { + let start = Instant::now(); + + match claim_mev_tips( + &merkle_trees, + rpc_url, + tip_distribution_program_id, + signer, + max_loop_duration, + micro_lamports, + ) + .await + { + Err(e) => { + datapoint_error!( + "claim_mev_workflow-claim_error", + ("epoch", merkle_trees.epoch, i64), + ("error", 1, i64), + ("err_str", e.to_string(), String), + ("elapsed_us", start.elapsed().as_micros(), i64), + ); + Err(e) + } + Ok(()) => { + datapoint_info!( + "claim_mev_workflow-claim_completion", + ("epoch", merkle_trees.epoch, i64), + ("elapsed_us", start.elapsed().as_micros(), i64), + ); + Ok(()) + } + } +} + +async fn start_rent_claim( + rpc_url: String, + tip_distribution_program_id: Pubkey, + signer: Arc, + max_loop_duration: Duration, + should_reclaim_tdas: bool, + micro_lamports: u64, + epoch: u64, +) -> Result<(), ClaimMevError> { + let start = Instant::now(); + match reclaim_rent( + rpc_url, + tip_distribution_program_id, + signer, + max_loop_duration, + should_reclaim_tdas, + micro_lamports, + ) + .await + { + Err(e) => { + datapoint_error!( + "claim_mev_workflow-reclaim_rent_error", + ("epoch", epoch, i64), + ("error", 1, i64), + ("err_str", e.to_string(), String), + ("elapsed_us", start.elapsed().as_micros(), i64), + ); + Err(e) + } + Ok(()) => { + datapoint_info!( + "claim_mev_workflow-reclaim_rent_completion", + ("epoch", epoch, i64), + ("elapsed_us", start.elapsed().as_micros(), i64), + ); + Ok(()) + } + } +} + +#[tokio::main] +async fn main() -> Result<(), ClaimMevError> { + env_logger::init(); + + gethostname() + .into_string() + .map(set_host_id) + .expect("set hostname"); + + let args: Args = Args::parse(); + let keypair = Arc::new(read_keypair_file(&args.keypair_path).expect("read keypair file")); + let merkle_trees: GeneratedMerkleTreeCollection = + read_json_from_file(&args.merkle_trees_path).expect("read GeneratedMerkleTreeCollection"); + let max_loop_duration = Duration::from_secs(args.max_retry_duration_secs); + + info!( + "Starting to claim mev tips for epoch: {}", + merkle_trees.epoch + ); + let epoch = merkle_trees.epoch; + + let mut futs = vec![]; + futs.push(tokio::spawn(start_mev_claim_process( + merkle_trees, + args.rpc_url.clone(), + args.tip_distribution_program_id, + keypair.clone(), + max_loop_duration, + args.micro_lamports, + ))); + if args.should_reclaim_rent { + futs.push(tokio::spawn(start_rent_claim( + args.rpc_url.clone(), + args.tip_distribution_program_id, + keypair.clone(), + max_loop_duration, + args.should_reclaim_tdas, + args.micro_lamports, + epoch, + ))); + } + let results = join_all(futs).await; + solana_metrics::flush(); // sometimes last datapoint doesn't get emitted. this increases likelihood. + for r in results { + r.map_err(|e| ClaimMevError::UncaughtError { e: e.to_string() })??; + } + Ok(()) +} diff --git a/tip-distributor/src/bin/merkle-root-generator.rs b/tip-distributor/src/bin/merkle-root-generator.rs new file mode 100644 index 0000000000..9f2d0f9a4e --- /dev/null +++ b/tip-distributor/src/bin/merkle-root-generator.rs @@ -0,0 +1,34 @@ +//! This binary generates a merkle tree for each [TipDistributionAccount]; they are derived +//! using a user provided [StakeMetaCollection] JSON file. + +use { + clap::Parser, log::*, + solana_tip_distributor::merkle_root_generator_workflow::generate_merkle_root, + std::path::PathBuf, +}; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + /// Path to JSON file containing the [StakeMetaCollection] object. + #[arg(long, env)] + stake_meta_coll_path: PathBuf, + + /// RPC to send transactions through. Used to validate what's being claimed is equal to TDA balance minus rent. + #[arg(long, env)] + rpc_url: String, + + /// Path to JSON file to get populated with tree node data. + #[arg(long, env)] + out_path: PathBuf, +} + +fn main() { + env_logger::init(); + info!("Starting merkle-root-generator workflow..."); + + let args: Args = Args::parse(); + generate_merkle_root(&args.stake_meta_coll_path, &args.out_path, &args.rpc_url) + .expect("merkle tree produced"); + info!("saved merkle roots to {:?}", args.stake_meta_coll_path); +} diff --git a/tip-distributor/src/bin/merkle-root-uploader.rs b/tip-distributor/src/bin/merkle-root-uploader.rs new file mode 100644 index 0000000000..9000ce66d0 --- /dev/null +++ b/tip-distributor/src/bin/merkle-root-uploader.rs @@ -0,0 +1,54 @@ +use { + clap::Parser, log::info, solana_sdk::pubkey::Pubkey, + solana_tip_distributor::merkle_root_upload_workflow::upload_merkle_root, std::path::PathBuf, +}; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + /// Path to JSON file containing the [StakeMetaCollection] object. + #[arg(long, env)] + merkle_root_path: PathBuf, + + /// The path to the keypair used to sign and pay for the `upload_merkle_root` transactions. + #[arg(long, env)] + keypair_path: PathBuf, + + /// The RPC to send transactions to. + #[arg(long, env)] + rpc_url: String, + + /// Tip distribution program ID + #[arg(long, env)] + tip_distribution_program_id: Pubkey, + + /// Rate-limits the maximum number of requests per RPC connection + #[arg(long, env, default_value_t = 100)] + max_concurrent_rpc_get_reqs: usize, + + /// Number of transactions to send to RPC at a time. + #[arg(long, env, default_value_t = 64)] + txn_send_batch_size: usize, +} + +fn main() { + env_logger::init(); + + let args: Args = Args::parse(); + + info!("starting merkle root uploader..."); + if let Err(e) = upload_merkle_root( + &args.merkle_root_path, + &args.keypair_path, + &args.rpc_url, + &args.tip_distribution_program_id, + args.max_concurrent_rpc_get_reqs, + args.txn_send_batch_size, + ) { + panic!("failed to upload merkle roots: {:?}", e); + } + info!( + "uploaded merkle roots from file {:?}", + args.merkle_root_path + ); +} diff --git a/tip-distributor/src/bin/stake-meta-generator.rs b/tip-distributor/src/bin/stake-meta-generator.rs new file mode 100644 index 0000000000..be7993be02 --- /dev/null +++ b/tip-distributor/src/bin/stake-meta-generator.rs @@ -0,0 +1,67 @@ +//! This binary is responsible for generating a JSON file that contains meta-data about stake +//! & delegations given a ledger snapshot directory. The JSON file is structured as an array +//! of [StakeMeta] objects. + +use { + clap::Parser, + log::*, + solana_sdk::{clock::Slot, pubkey::Pubkey}, + solana_tip_distributor::{self, stake_meta_generator_workflow::generate_stake_meta}, + std::{ + fs::{self}, + path::PathBuf, + process::exit, + }, +}; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + /// Ledger path, where you created the snapshot. + #[arg(long, env, value_parser = Args::ledger_path_parser)] + ledger_path: PathBuf, + + /// The tip-distribution program id. + #[arg(long, env)] + tip_distribution_program_id: Pubkey, + + /// The tip-payment program id. + #[arg(long, env)] + tip_payment_program_id: Pubkey, + + /// Path to JSON file populated with the [StakeMetaCollection] object. + #[arg(long, env)] + out_path: String, + + /// The expected snapshot slot. + #[arg(long, env)] + snapshot_slot: Slot, +} + +impl Args { + fn ledger_path_parser(ledger_path: &str) -> Result { + Ok(fs::canonicalize(ledger_path).unwrap_or_else(|err| { + error!("Unable to access ledger path '{}': {}", ledger_path, err); + exit(1); + })) + } +} + +fn main() { + env_logger::init(); + info!("Starting stake-meta-generator..."); + + let args: Args = Args::parse(); + + if let Err(e) = generate_stake_meta( + &args.ledger_path, + &args.snapshot_slot, + &args.tip_distribution_program_id, + &args.out_path, + &args.tip_payment_program_id, + ) { + error!("error producing stake-meta: {:?}", e); + } else { + info!("produced stake meta"); + } +} diff --git a/tip-distributor/src/claim_mev_workflow.rs b/tip-distributor/src/claim_mev_workflow.rs new file mode 100644 index 0000000000..929b64fe3b --- /dev/null +++ b/tip-distributor/src/claim_mev_workflow.rs @@ -0,0 +1,398 @@ +use { + crate::{send_until_blockhash_expires, GeneratedMerkleTreeCollection}, + anchor_lang::{AccountDeserialize, InstructionData, ToAccountMetas}, + itertools::Itertools, + jito_tip_distribution::state::{ClaimStatus, Config, TipDistributionAccount}, + log::{error, info, warn}, + rand::{prelude::SliceRandom, thread_rng}, + solana_client::nonblocking::rpc_client::RpcClient, + solana_metrics::datapoint_info, + solana_program::{ + fee_calculator::DEFAULT_TARGET_LAMPORTS_PER_SIGNATURE, native_token::LAMPORTS_PER_SOL, + system_program, + }, + solana_rpc_client_api::config::RpcSimulateTransactionConfig, + solana_sdk::{ + account::Account, + commitment_config::CommitmentConfig, + compute_budget::ComputeBudgetInstruction, + instruction::Instruction, + pubkey::Pubkey, + signature::{Keypair, Signer}, + transaction::Transaction, + }, + std::{ + collections::HashMap, + sync::Arc, + time::{Duration, Instant}, + }, + thiserror::Error, +}; + +#[derive(Error, Debug)] +pub enum ClaimMevError { + #[error(transparent)] + IoError(#[from] std::io::Error), + + #[error(transparent)] + JsonError(#[from] serde_json::Error), + + #[error(transparent)] + AnchorError(anchor_lang::error::Error), + + #[error(transparent)] + RpcError(#[from] solana_rpc_client_api::client_error::Error), + + #[error("Expected to have at least {desired_balance} lamports in {payer:?}. Current balance is {start_balance} lamports. Deposit {sol_to_deposit} SOL to continue.")] + InsufficientBalance { + desired_balance: u64, + payer: Pubkey, + start_balance: u64, + sol_to_deposit: u64, + }, + + #[error("Not finished with job, transactions left {transactions_left}")] + NotFinished { transactions_left: usize }, + + #[error("UncaughtError {e:?}")] + UncaughtError { e: String }, +} + +pub async fn get_claim_transactions_for_valid_unclaimed( + rpc_client: &RpcClient, + merkle_trees: &GeneratedMerkleTreeCollection, + tip_distribution_program_id: Pubkey, + micro_lamports: u64, + payer_pubkey: Pubkey, +) -> Result, ClaimMevError> { + let tree_nodes = merkle_trees + .generated_merkle_trees + .iter() + .flat_map(|tree| &tree.tree_nodes) + .collect_vec(); + + info!( + "reading tip distribution related accounts for epoch {}", + merkle_trees.epoch + ); + + let start = Instant::now(); + + let tda_pubkeys = merkle_trees + .generated_merkle_trees + .iter() + .map(|tree| tree.tip_distribution_account) + .collect_vec(); + let tdas: HashMap = crate::get_batched_accounts(rpc_client, &tda_pubkeys) + .await? + .into_iter() + .filter_map(|(pubkey, a)| Some((pubkey, a?))) + .collect(); + + let claimant_pubkeys = tree_nodes + .iter() + .map(|tree_node| tree_node.claimant) + .collect_vec(); + let claimants: HashMap = + crate::get_batched_accounts(rpc_client, &claimant_pubkeys) + .await? + .into_iter() + .filter_map(|(pubkey, a)| Some((pubkey, a?))) + .collect(); + + let claim_status_pubkeys = tree_nodes + .iter() + .map(|tree_node| tree_node.claim_status_pubkey) + .collect_vec(); + let claim_statuses: HashMap = + crate::get_batched_accounts(rpc_client, &claim_status_pubkeys) + .await? + .into_iter() + .filter_map(|(pubkey, a)| Some((pubkey, a?))) + .collect(); + + let elapsed_us = start.elapsed().as_micros(); + + // can be helpful for determining mismatch in state between requested and read + datapoint_info!( + "claim_mev-get_claim_transactions_account_data", + ("elapsed_us", elapsed_us, i64), + ("tdas", tda_pubkeys.len(), i64), + ("tdas_onchain", tdas.len(), i64), + ("claimants", claimant_pubkeys.len(), i64), + ("claimants_onchain", claimants.len(), i64), + ("claim_statuses", claim_status_pubkeys.len(), i64), + ("claim_statuses_onchain", claim_statuses.len(), i64), + ); + + let transactions = build_mev_claim_transactions( + tip_distribution_program_id, + merkle_trees, + tdas, + claimants, + claim_statuses, + micro_lamports, + payer_pubkey, + ); + + Ok(transactions) +} + +pub async fn claim_mev_tips( + merkle_trees: &GeneratedMerkleTreeCollection, + rpc_url: String, + tip_distribution_program_id: Pubkey, + keypair: Arc, + max_loop_duration: Duration, + micro_lamports: u64, +) -> Result<(), ClaimMevError> { + let rpc_client = RpcClient::new_with_timeout_and_commitment( + rpc_url, + Duration::from_secs(300), + CommitmentConfig::confirmed(), + ); + + let start = Instant::now(); + while start.elapsed() <= max_loop_duration { + let mut all_claim_transactions = get_claim_transactions_for_valid_unclaimed( + &rpc_client, + merkle_trees, + tip_distribution_program_id, + micro_lamports, + keypair.pubkey(), + ) + .await?; + + datapoint_info!( + "claim_mev_tips-send_summary", + ("claim_transactions_left", all_claim_transactions.len(), i64), + ); + + if all_claim_transactions.is_empty() { + return Ok(()); + } + + all_claim_transactions.shuffle(&mut thread_rng()); + let transactions: Vec<_> = all_claim_transactions.into_iter().take(10_000).collect(); + + // only check balance for the ones we need to currently send since reclaim rent running in parallel + if let Some((start_balance, desired_balance, sol_to_deposit)) = + is_sufficient_balance(&keypair.pubkey(), &rpc_client, transactions.len() as u64).await + { + return Err(ClaimMevError::InsufficientBalance { + desired_balance, + payer: keypair.pubkey(), + start_balance, + sol_to_deposit, + }); + } + + let blockhash = rpc_client.get_latest_blockhash().await?; + let _ = send_until_blockhash_expires(&rpc_client, transactions, blockhash, &keypair).await; + } + + let transactions = get_claim_transactions_for_valid_unclaimed( + &rpc_client, + merkle_trees, + tip_distribution_program_id, + micro_lamports, + keypair.pubkey(), + ) + .await?; + if transactions.is_empty() { + return Ok(()); + } + + // if more transactions left, we'll simulate them all to make sure its not an uncaught error + let mut is_error = false; + let mut error_str = String::new(); + for tx in &transactions { + match rpc_client + .simulate_transaction_with_config( + tx, + RpcSimulateTransactionConfig { + sig_verify: false, + replace_recent_blockhash: true, + commitment: Some(CommitmentConfig::processed()), + ..RpcSimulateTransactionConfig::default() + }, + ) + .await + { + Ok(_) => {} + Err(e) => { + error_str = e.to_string(); + is_error = true; + + match e.get_transaction_error() { + None => { + break; + } + Some(e) => { + warn!("transaction error. tx: {:?} error: {:?}", tx, e); + break; + } + } + } + } + } + + if is_error { + Err(ClaimMevError::UncaughtError { e: error_str }) + } else { + Err(ClaimMevError::NotFinished { + transactions_left: transactions.len(), + }) + } +} + +/// Returns a list of claim transactions for valid, unclaimed MEV tips +/// A valid, unclaimed transaction consists of the following: +/// - there must be lamports to claim for the tip distribution account. +/// - there must be a merkle root. +/// - the claimant (typically a stake account) must exist. +/// - the claimant (typically a stake account) must have a non-zero amount of tips to claim +/// - the claimant must have enough lamports post-claim to be rent-exempt. +/// - note: there aren't any rent exempt accounts on solana mainnet anymore. +/// - it must not have already been claimed. +fn build_mev_claim_transactions( + tip_distribution_program_id: Pubkey, + merkle_trees: &GeneratedMerkleTreeCollection, + tdas: HashMap, + claimants: HashMap, + claim_status: HashMap, + micro_lamports: u64, + payer_pubkey: Pubkey, +) -> Vec { + let tip_distribution_accounts: HashMap = tdas + .iter() + .filter_map(|(pubkey, account)| { + Some(( + *pubkey, + TipDistributionAccount::try_deserialize(&mut account.data.as_slice()).ok()?, + )) + }) + .collect(); + + let claim_statuses: HashMap = claim_status + .iter() + .filter_map(|(pubkey, account)| { + Some(( + *pubkey, + ClaimStatus::try_deserialize(&mut account.data.as_slice()).ok()?, + )) + }) + .collect(); + + datapoint_info!( + "build_mev_claim_transactions", + ( + "tip_distribution_accounts", + tip_distribution_accounts.len(), + i64 + ), + ("claim_statuses", claim_statuses.len(), i64), + ); + + let tip_distribution_config = + Pubkey::find_program_address(&[Config::SEED], &tip_distribution_program_id).0; + + let mut instructions = Vec::with_capacity(claimants.len()); + for tree in &merkle_trees.generated_merkle_trees { + if tree.max_total_claim == 0 { + continue; + } + + // if unwrap panics, there's a bug in the merkle tree code because the merkle tree code relies on the state + // of the chain to claim. + let tip_distribution_account = tip_distribution_accounts + .get(&tree.tip_distribution_account) + .unwrap(); + + // can continue here, as there might be tip distribution accounts this account doesn't upload for + if tip_distribution_account.merkle_root.is_none() { + continue; + } + + for node in &tree.tree_nodes { + // doesn't make sense to claim for claimants that don't exist anymore + // can't claim for something already claimed + // don't need to claim for claimants that get 0 MEV + if claimants.get(&node.claimant).is_none() + || claim_statuses.get(&node.claim_status_pubkey).is_some() + || node.amount == 0 + { + continue; + } + + instructions.push(Instruction { + program_id: tip_distribution_program_id, + data: jito_tip_distribution::instruction::Claim { + proof: node.proof.clone().unwrap(), + amount: node.amount, + bump: node.claim_status_bump, + } + .data(), + accounts: jito_tip_distribution::accounts::Claim { + config: tip_distribution_config, + tip_distribution_account: tree.tip_distribution_account, + claimant: node.claimant, + claim_status: node.claim_status_pubkey, + payer: payer_pubkey, + system_program: system_program::id(), + } + .to_account_metas(None), + }); + } + } + + // TODO (LB): see if we can do >1 claim here + let transactions: Vec = instructions + .into_iter() + .map(|claim_ix| { + let priority_fee_ix = ComputeBudgetInstruction::set_compute_unit_price(micro_lamports); + Transaction::new_with_payer(&[priority_fee_ix, claim_ix], Some(&payer_pubkey)) + }) + .collect(); + + transactions +} + +/// heuristic to make sure we have enough funds to cover the rent costs if epoch has many validators +/// If insufficient funds, returns start balance, desired balance, and amount of sol to deposit +async fn is_sufficient_balance( + payer: &Pubkey, + rpc_client: &RpcClient, + instruction_count: u64, +) -> Option<(u64, u64, u64)> { + let start_balance = rpc_client + .get_balance(payer) + .await + .expect("Failed to get starting balance"); + // most amounts are for 0 lamports. had 1736 non-zero claims out of 164742 + let min_rent_per_claim = rpc_client + .get_minimum_balance_for_rent_exemption(ClaimStatus::SIZE) + .await + .expect("Failed to calculate min rent"); + let desired_balance = instruction_count + .checked_mul( + min_rent_per_claim + .checked_add(DEFAULT_TARGET_LAMPORTS_PER_SIGNATURE) + .unwrap(), + ) + .unwrap(); + if start_balance < desired_balance { + let sol_to_deposit = desired_balance + .checked_sub(start_balance) + .unwrap() + .checked_add(LAMPORTS_PER_SOL) + .unwrap() + .checked_sub(1) + .unwrap() + .checked_div(LAMPORTS_PER_SOL) + .unwrap(); // rounds up to nearest sol + Some((start_balance, desired_balance, sol_to_deposit)) + } else { + None + } +} diff --git a/tip-distributor/src/lib.rs b/tip-distributor/src/lib.rs new file mode 100644 index 0000000000..8ee6b50f5d --- /dev/null +++ b/tip-distributor/src/lib.rs @@ -0,0 +1,1062 @@ +pub mod claim_mev_workflow; +pub mod merkle_root_generator_workflow; +pub mod merkle_root_upload_workflow; +pub mod reclaim_rent_workflow; +pub mod stake_meta_generator_workflow; + +use { + crate::{ + merkle_root_generator_workflow::MerkleRootGeneratorError, + stake_meta_generator_workflow::StakeMetaGeneratorError::CheckedMathError, + }, + anchor_lang::Id, + jito_tip_distribution::{ + program::JitoTipDistribution, + state::{ClaimStatus, TipDistributionAccount}, + }, + jito_tip_payment::{ + Config, CONFIG_ACCOUNT_SEED, TIP_ACCOUNT_SEED_0, TIP_ACCOUNT_SEED_1, TIP_ACCOUNT_SEED_2, + TIP_ACCOUNT_SEED_3, TIP_ACCOUNT_SEED_4, TIP_ACCOUNT_SEED_5, TIP_ACCOUNT_SEED_6, + TIP_ACCOUNT_SEED_7, + }, + log::*, + serde::{de::DeserializeOwned, Deserialize, Serialize}, + solana_client::{ + nonblocking::rpc_client::RpcClient, + rpc_client::{RpcClient as SyncRpcClient, SerializableTransaction}, + }, + solana_merkle_tree::MerkleTree, + solana_metrics::{datapoint_error, datapoint_warn}, + solana_program::{ + instruction::InstructionError, + rent::{ + ACCOUNT_STORAGE_OVERHEAD, DEFAULT_EXEMPTION_THRESHOLD, DEFAULT_LAMPORTS_PER_BYTE_YEAR, + }, + }, + solana_rpc_client_api::{ + client_error::{Error, ErrorKind}, + config::RpcSendTransactionConfig, + request::{RpcError, RpcResponseErrorData, MAX_MULTIPLE_ACCOUNTS}, + response::RpcSimulateTransactionResult, + }, + solana_sdk::{ + account::{Account, AccountSharedData, ReadableAccount}, + clock::Slot, + commitment_config::{CommitmentConfig, CommitmentLevel}, + hash::{Hash, Hasher}, + pubkey::Pubkey, + signature::{Keypair, Signature}, + stake_history::Epoch, + transaction::{ + Transaction, + TransactionError::{self}, + }, + }, + solana_transaction_status::TransactionStatus, + std::{ + collections::{HashMap, HashSet}, + fs::File, + io::BufReader, + path::PathBuf, + sync::Arc, + time::{Duration, Instant}, + }, + tokio::{sync::Semaphore, time::sleep}, +}; + +#[derive(Clone, Deserialize, Serialize, Debug)] +pub struct GeneratedMerkleTreeCollection { + pub generated_merkle_trees: Vec, + pub bank_hash: String, + pub epoch: Epoch, + pub slot: Slot, +} + +#[derive(Clone, Eq, Debug, Hash, PartialEq, Deserialize, Serialize)] +pub struct GeneratedMerkleTree { + #[serde(with = "pubkey_string_conversion")] + pub tip_distribution_account: Pubkey, + #[serde(with = "pubkey_string_conversion")] + pub merkle_root_upload_authority: Pubkey, + pub merkle_root: Hash, + pub tree_nodes: Vec, + pub max_total_claim: u64, + pub max_num_nodes: u64, +} + +pub struct TipPaymentPubkeys { + config_pda: Pubkey, + tip_pdas: Vec, +} + +fn emit_inconsistent_tree_node_amount_dp( + tree_nodes: &[TreeNode], + tip_distribution_account: &Pubkey, + rpc_client: &SyncRpcClient, +) { + let actual_claims: u64 = tree_nodes.iter().map(|t| t.amount).sum(); + let tda = rpc_client.get_account(tip_distribution_account).unwrap(); + let min_rent = rpc_client + .get_minimum_balance_for_rent_exemption(tda.data.len()) + .unwrap(); + + let expected_claims = tda.lamports.checked_sub(min_rent).unwrap(); + if actual_claims == expected_claims { + return; + } + + if actual_claims > expected_claims { + datapoint_error!( + "tip-distributor", + ( + "actual_claims_exceeded", + format!("tip_distribution_account={tip_distribution_account},actual_claims={actual_claims}, expected_claims={expected_claims}"), + String + ), + ); + } else { + datapoint_warn!( + "tip-distributor", + ( + "actual_claims_below", + format!("tip_distribution_account={tip_distribution_account},actual_claims={actual_claims}, expected_claims={expected_claims}"), + String + ), + ); + } +} + +impl GeneratedMerkleTreeCollection { + pub fn new_from_stake_meta_collection( + stake_meta_coll: StakeMetaCollection, + maybe_rpc_client: Option, + ) -> Result { + let generated_merkle_trees = stake_meta_coll + .stake_metas + .into_iter() + .filter(|stake_meta| stake_meta.maybe_tip_distribution_meta.is_some()) + .filter_map(|stake_meta| { + let mut tree_nodes = match TreeNode::vec_from_stake_meta(&stake_meta) { + Err(e) => return Some(Err(e)), + Ok(maybe_tree_nodes) => maybe_tree_nodes, + }?; + + if let Some(rpc_client) = &maybe_rpc_client { + if let Some(tda) = stake_meta.maybe_tip_distribution_meta.as_ref() { + emit_inconsistent_tree_node_amount_dp( + &tree_nodes[..], + &tda.tip_distribution_pubkey, + rpc_client, + ); + } + } + + let hashed_nodes: Vec<[u8; 32]> = + tree_nodes.iter().map(|n| n.hash().to_bytes()).collect(); + + let tip_distribution_meta = stake_meta.maybe_tip_distribution_meta.unwrap(); + + let merkle_tree = MerkleTree::new(&hashed_nodes[..], true); + let max_num_nodes = tree_nodes.len() as u64; + + for (i, tree_node) in tree_nodes.iter_mut().enumerate() { + tree_node.proof = Some(get_proof(&merkle_tree, i)); + } + + Some(Ok(GeneratedMerkleTree { + max_num_nodes, + tip_distribution_account: tip_distribution_meta.tip_distribution_pubkey, + merkle_root_upload_authority: tip_distribution_meta + .merkle_root_upload_authority, + merkle_root: *merkle_tree.get_root().unwrap(), + tree_nodes, + max_total_claim: tip_distribution_meta.total_tips, + })) + }) + .collect::, MerkleRootGeneratorError>>()?; + + Ok(GeneratedMerkleTreeCollection { + generated_merkle_trees, + bank_hash: stake_meta_coll.bank_hash, + epoch: stake_meta_coll.epoch, + slot: stake_meta_coll.slot, + }) + } +} + +pub fn get_proof(merkle_tree: &MerkleTree, i: usize) -> Vec<[u8; 32]> { + let mut proof = Vec::new(); + let path = merkle_tree.find_path(i).expect("path to index"); + for branch in path.get_proof_entries() { + if let Some(hash) = branch.get_left_sibling() { + proof.push(hash.to_bytes()); + } else if let Some(hash) = branch.get_right_sibling() { + proof.push(hash.to_bytes()); + } else { + panic!("expected some hash at each level of the tree"); + } + } + proof +} + +fn derive_tip_payment_pubkeys(program_id: &Pubkey) -> TipPaymentPubkeys { + let config_pda = Pubkey::find_program_address(&[CONFIG_ACCOUNT_SEED], program_id).0; + let tip_pda_0 = Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_0], program_id).0; + let tip_pda_1 = Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_1], program_id).0; + let tip_pda_2 = Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_2], program_id).0; + let tip_pda_3 = Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_3], program_id).0; + let tip_pda_4 = Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_4], program_id).0; + let tip_pda_5 = Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_5], program_id).0; + let tip_pda_6 = Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_6], program_id).0; + let tip_pda_7 = Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_7], program_id).0; + + TipPaymentPubkeys { + config_pda, + tip_pdas: vec![ + tip_pda_0, tip_pda_1, tip_pda_2, tip_pda_3, tip_pda_4, tip_pda_5, tip_pda_6, tip_pda_7, + ], + } +} + +#[derive(Clone, Eq, Debug, Hash, PartialEq, Deserialize, Serialize)] +pub struct TreeNode { + /// The stake account entitled to redeem. + #[serde(with = "pubkey_string_conversion")] + pub claimant: Pubkey, + + /// Pubkey of the ClaimStatus PDA account, this account should be closed to reclaim rent. + #[serde(with = "pubkey_string_conversion")] + pub claim_status_pubkey: Pubkey, + + /// Bump of the ClaimStatus PDA account + pub claim_status_bump: u8, + + #[serde(with = "pubkey_string_conversion")] + pub staker_pubkey: Pubkey, + + #[serde(with = "pubkey_string_conversion")] + pub withdrawer_pubkey: Pubkey, + + /// The amount this account is entitled to. + pub amount: u64, + + /// The proof associated with this TreeNode + pub proof: Option>, +} + +impl TreeNode { + fn vec_from_stake_meta( + stake_meta: &StakeMeta, + ) -> Result>, MerkleRootGeneratorError> { + if let Some(tip_distribution_meta) = stake_meta.maybe_tip_distribution_meta.as_ref() { + let validator_amount = (tip_distribution_meta.total_tips as u128) + .checked_mul(tip_distribution_meta.validator_fee_bps as u128) + .unwrap() + .checked_div(10_000) + .unwrap() as u64; + let (claim_status_pubkey, claim_status_bump) = Pubkey::find_program_address( + &[ + ClaimStatus::SEED, + &stake_meta.validator_vote_account.to_bytes(), + &tip_distribution_meta.tip_distribution_pubkey.to_bytes(), + ], + &JitoTipDistribution::id(), + ); + let mut tree_nodes = vec![TreeNode { + claimant: stake_meta.validator_vote_account, + claim_status_pubkey, + claim_status_bump, + staker_pubkey: Pubkey::default(), + withdrawer_pubkey: Pubkey::default(), + amount: validator_amount, + proof: None, + }]; + + let remaining_total_rewards = tip_distribution_meta + .total_tips + .checked_sub(validator_amount) + .unwrap() as u128; + + let total_delegated = stake_meta.total_delegated as u128; + tree_nodes.extend( + stake_meta + .delegations + .iter() + .map(|delegation| { + let amount_delegated = delegation.lamports_delegated as u128; + let reward_amount = (amount_delegated.checked_mul(remaining_total_rewards)) + .unwrap() + .checked_div(total_delegated) + .unwrap(); + let (claim_status_pubkey, claim_status_bump) = Pubkey::find_program_address( + &[ + ClaimStatus::SEED, + &delegation.stake_account_pubkey.to_bytes(), + &tip_distribution_meta.tip_distribution_pubkey.to_bytes(), + ], + &JitoTipDistribution::id(), + ); + Ok(TreeNode { + claimant: delegation.stake_account_pubkey, + claim_status_pubkey, + claim_status_bump, + staker_pubkey: delegation.staker_pubkey, + withdrawer_pubkey: delegation.withdrawer_pubkey, + amount: reward_amount as u64, + proof: None, + }) + }) + .collect::, MerkleRootGeneratorError>>()?, + ); + + Ok(Some(tree_nodes)) + } else { + Ok(None) + } + } + + fn hash(&self) -> Hash { + let mut hasher = Hasher::default(); + hasher.hash(self.claimant.as_ref()); + hasher.hash(self.amount.to_le_bytes().as_ref()); + hasher.result() + } +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct StakeMetaCollection { + /// List of [StakeMeta]. + pub stake_metas: Vec, + + /// base58 encoded tip-distribution program id. + #[serde(with = "pubkey_string_conversion")] + pub tip_distribution_program_id: Pubkey, + + /// Base58 encoded bank hash this object was generated at. + pub bank_hash: String, + + /// Epoch for which this object was generated for. + pub epoch: Epoch, + + /// Slot at which this object was generated. + pub slot: Slot, +} + +#[derive(Clone, Deserialize, Serialize, Debug, PartialEq, Eq)] +pub struct StakeMeta { + #[serde(with = "pubkey_string_conversion")] + pub validator_vote_account: Pubkey, + + #[serde(with = "pubkey_string_conversion")] + pub validator_node_pubkey: Pubkey, + + /// The validator's tip-distribution meta if it exists. + pub maybe_tip_distribution_meta: Option, + + /// Delegations to this validator. + pub delegations: Vec, + + /// The total amount of delegations to the validator. + pub total_delegated: u64, + + /// The validator's delegation commission rate as a percentage between 0-100. + pub commission: u8, +} + +impl Ord for StakeMeta { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.validator_vote_account + .cmp(&other.validator_vote_account) + } +} + +impl PartialOrd for StakeMeta { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +#[derive(Clone, Deserialize, Serialize, Debug, PartialEq, Eq)] +pub struct TipDistributionMeta { + #[serde(with = "pubkey_string_conversion")] + pub merkle_root_upload_authority: Pubkey, + + #[serde(with = "pubkey_string_conversion")] + pub tip_distribution_pubkey: Pubkey, + + /// The validator's total tips in the [TipDistributionAccount]. + pub total_tips: u64, + + /// The validator's cut of tips from [TipDistributionAccount], calculated from the on-chain + /// commission fee bps. + pub validator_fee_bps: u16, +} + +impl TipDistributionMeta { + fn from_tda_wrapper( + tda_wrapper: TipDistributionAccountWrapper, + // The amount that will be left remaining in the tda to maintain rent exemption status. + rent_exempt_amount: u64, + ) -> Result { + Ok(TipDistributionMeta { + tip_distribution_pubkey: tda_wrapper.tip_distribution_pubkey, + total_tips: tda_wrapper + .account_data + .lamports() + .checked_sub(rent_exempt_amount) + .ok_or(CheckedMathError)?, + validator_fee_bps: tda_wrapper + .tip_distribution_account + .validator_commission_bps, + merkle_root_upload_authority: tda_wrapper + .tip_distribution_account + .merkle_root_upload_authority, + }) + } +} + +#[derive(Clone, Deserialize, Serialize, Debug, PartialEq, Eq)] +pub struct Delegation { + #[serde(with = "pubkey_string_conversion")] + pub stake_account_pubkey: Pubkey, + + #[serde(with = "pubkey_string_conversion")] + pub staker_pubkey: Pubkey, + + #[serde(with = "pubkey_string_conversion")] + pub withdrawer_pubkey: Pubkey, + + /// Lamports delegated by the stake account + pub lamports_delegated: u64, +} + +impl Ord for Delegation { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + ( + self.stake_account_pubkey, + self.withdrawer_pubkey, + self.staker_pubkey, + self.lamports_delegated, + ) + .cmp(&( + other.stake_account_pubkey, + other.withdrawer_pubkey, + other.staker_pubkey, + other.lamports_delegated, + )) + } +} + +impl PartialOrd for Delegation { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +/// Convenience wrapper around [TipDistributionAccount] +pub struct TipDistributionAccountWrapper { + pub tip_distribution_account: TipDistributionAccount, + pub account_data: AccountSharedData, + pub tip_distribution_pubkey: Pubkey, +} + +// TODO: move to program's sdk +pub fn derive_tip_distribution_account_address( + tip_distribution_program_id: &Pubkey, + vote_pubkey: &Pubkey, + epoch: Epoch, +) -> (Pubkey, u8) { + Pubkey::find_program_address( + &[ + TipDistributionAccount::SEED, + vote_pubkey.to_bytes().as_ref(), + epoch.to_le_bytes().as_ref(), + ], + tip_distribution_program_id, + ) +} + +pub const MAX_RETRIES: usize = 5; +pub const FAIL_DELAY: Duration = Duration::from_millis(100); + +pub async fn sign_and_send_transactions_with_retries( + signer: &Keypair, + rpc_client: &RpcClient, + max_concurrent_rpc_get_reqs: usize, + transactions: Vec, + txn_send_batch_size: usize, + max_loop_duration: Duration, +) -> (Vec, HashMap) { + let semaphore = Arc::new(Semaphore::new(max_concurrent_rpc_get_reqs)); + let mut errors = HashMap::default(); + let mut blockhash = rpc_client + .get_latest_blockhash() + .await + .expect("fetch latest blockhash"); + // track unsigned txns + let mut transactions_to_process = transactions + .into_iter() + .map(|txn| (txn.message_data(), txn)) + .collect::, Transaction>>(); + + let start = Instant::now(); + while start.elapsed() < max_loop_duration && !transactions_to_process.is_empty() { + // ensure we always have a recent blockhash + // blockhashes last max 150 blocks + // finalized commitment is ~32 slots behind tip + // assuming 0% skip rate (every slot has a block), we’d have roughly 120 slots + // or (120*0.4s) = 48s to land a tx before it expires + // if we’re refreshing every 30s, then any txs sent immediately before the refresh would likely expire + if start.elapsed() > Duration::from_secs(1) { + blockhash = rpc_client + .get_latest_blockhash() + .await + .expect("fetch latest blockhash"); + } + info!( + "Sending {txn_send_batch_size} of {} transactions to claim mev tips", + transactions_to_process.len() + ); + let send_futs = transactions_to_process + .iter() + .take(txn_send_batch_size) + .map(|(hash, txn)| { + let semaphore = semaphore.clone(); + async move { + let _permit = semaphore.acquire_owned().await.unwrap(); // wait until our turn + let (txn, res) = signed_send(signer, rpc_client, blockhash, txn.clone()).await; + (hash.clone(), txn, res) + } + }); + + let send_res = futures::future::join_all(send_futs).await; + let new_errors = send_res + .into_iter() + .filter_map(|(hash, txn, result)| match result { + Err(e) => Some((txn.signatures[0], e)), + Ok(..) => { + let _ = transactions_to_process.remove(&hash); + None + } + }) + .collect::>(); + + errors.extend(new_errors); + } + + (transactions_to_process.values().cloned().collect(), errors) +} + +pub async fn send_until_blockhash_expires( + rpc_client: &RpcClient, + transactions: Vec, + blockhash: Hash, + keypair: &Arc, +) -> solana_rpc_client_api::client_error::Result<()> { + let mut claim_transactions: HashMap = transactions + .into_iter() + .map(|mut tx| { + tx.sign(&[&keypair], blockhash); + (*tx.get_signature(), tx) + }) + .collect(); + + let txs_requesting_send = claim_transactions.len(); + + while rpc_client + .is_blockhash_valid(&blockhash, CommitmentConfig::processed()) + .await? + { + let mut check_signatures = HashSet::with_capacity(claim_transactions.len()); + let mut already_processed = HashSet::with_capacity(claim_transactions.len()); + let mut is_blockhash_not_found = false; + + for (signature, tx) in &claim_transactions { + match rpc_client + .send_transaction_with_config( + tx, + RpcSendTransactionConfig { + skip_preflight: false, + preflight_commitment: Some(CommitmentLevel::Confirmed), + max_retries: Some(2), + ..RpcSendTransactionConfig::default() + }, + ) + .await + { + Ok(_) => { + check_signatures.insert(*signature); + } + Err(e) => match e.get_transaction_error() { + Some(TransactionError::BlockhashNotFound) => { + is_blockhash_not_found = true; + break; + } + Some(TransactionError::AlreadyProcessed) => { + already_processed.insert(*tx.get_signature()); + } + Some(e) => { + warn!( + "TransactionError sending signature: {} error: {:?} tx: {:?}", + tx.get_signature(), + e, + tx + ); + } + None => { + warn!( + "Unknown error sending transaction signature: {} error: {:?}", + tx.get_signature(), + e + ); + } + }, + } + } + + sleep(Duration::from_secs(10)).await; + + let signatures: Vec = check_signatures.iter().cloned().collect(); + let statuses = get_batched_signatures_statuses(rpc_client, &signatures).await?; + + for (signature, maybe_status) in &statuses { + if let Some(_status) = maybe_status { + claim_transactions.remove(signature); + check_signatures.remove(signature); + } + } + + for signature in already_processed { + claim_transactions.remove(&signature); + } + + if claim_transactions.is_empty() || is_blockhash_not_found { + break; + } + } + + let num_landed = txs_requesting_send + .checked_sub(claim_transactions.len()) + .unwrap(); + info!("num_landed: {:?}", num_landed); + + Ok(()) +} + +pub async fn get_batched_signatures_statuses( + rpc_client: &RpcClient, + signatures: &[Signature], +) -> solana_rpc_client_api::client_error::Result)>> { + let mut signature_statuses = Vec::new(); + + for signatures_batch in signatures.chunks(100) { + // was using get_signature_statuses_with_history, but it blocks if the signatures don't exist + // bigtable calls to read signatures that don't exist block forever w/o --rpc-bigtable-timeout argument set + // get_signature_statuses looks in status_cache, which only has a 150 block history + // may have false negative, but for this workflow it doesn't matter + let statuses = rpc_client.get_signature_statuses(signatures_batch).await?; + signature_statuses.extend(signatures_batch.iter().cloned().zip(statuses.value)); + } + Ok(signature_statuses) +} + +/// Just in time sign and send transaction to RPC +async fn signed_send( + signer: &Keypair, + rpc_client: &RpcClient, + blockhash: Hash, + mut txn: Transaction, +) -> (Transaction, solana_rpc_client_api::client_error::Result<()>) { + txn.sign(&[signer], blockhash); // just in time signing + let res = match rpc_client.send_and_confirm_transaction(&txn).await { + Ok(_) => Ok(()), + Err(e) => { + match e.kind { + // Already claimed, skip. + ErrorKind::TransactionError(TransactionError::AlreadyProcessed) + | ErrorKind::TransactionError(TransactionError::InstructionError( + 0, + InstructionError::Custom(0), + )) + | ErrorKind::RpcError(RpcError::RpcResponseError { + data: + RpcResponseErrorData::SendTransactionPreflightFailure( + RpcSimulateTransactionResult { + err: + Some(TransactionError::InstructionError( + 0, + InstructionError::Custom(0), + )), + .. + }, + ), + .. + }) => Ok(()), + + // transaction got held up too long and blockhash expired. retry txn + ErrorKind::TransactionError(TransactionError::BlockhashNotFound) => Err(e), + + // unexpected error, warn and retry + _ => { + error!( + "Error sending transaction. Signature: {}, Error: {e:?}", + txn.signatures[0] + ); + Err(e) + } + } + } + }; + + (txn, res) +} + +async fn get_batched_accounts( + rpc_client: &RpcClient, + pubkeys: &[Pubkey], +) -> solana_rpc_client_api::client_error::Result>> { + let mut batched_accounts = HashMap::new(); + + for pubkeys_chunk in pubkeys.chunks(MAX_MULTIPLE_ACCOUNTS) { + let accounts = rpc_client.get_multiple_accounts(pubkeys_chunk).await?; + batched_accounts.extend(pubkeys_chunk.iter().cloned().zip(accounts)); + } + Ok(batched_accounts) +} + +/// Calculates the minimum balance needed to be rent-exempt +/// taken from: https://github.com/jito-foundation/jito-solana/blob/d1ba42180d0093dd59480a77132477323a8e3f88/sdk/program/src/rent.rs#L78 +pub fn minimum_balance(data_len: usize) -> u64 { + ((((ACCOUNT_STORAGE_OVERHEAD + .checked_add(data_len as u64) + .unwrap()) + .checked_mul(DEFAULT_LAMPORTS_PER_BYTE_YEAR)) + .unwrap() as f64) + * DEFAULT_EXEMPTION_THRESHOLD) as u64 +} + +mod pubkey_string_conversion { + use { + serde::{self, Deserialize, Deserializer, Serializer}, + solana_sdk::pubkey::Pubkey, + std::str::FromStr, + }; + + pub(crate) fn serialize(pubkey: &Pubkey, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&pubkey.to_string()) + } + + pub(crate) fn deserialize<'de, D>(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let s = String::deserialize(deserializer)?; + Pubkey::from_str(&s).map_err(serde::de::Error::custom) + } +} + +pub fn read_json_from_file(path: &PathBuf) -> serde_json::Result +where + T: DeserializeOwned, +{ + let file = File::open(path).unwrap(); + let reader = BufReader::new(file); + serde_json::from_reader(reader) +} + +#[cfg(test)] +mod tests { + use {super::*, jito_tip_distribution::merkle_proof}; + + #[test] + fn test_merkle_tree_verify() { + // Create the merkle tree and proofs + let tda = Pubkey::new_unique(); + let (acct_0, acct_1) = (Pubkey::new_unique(), Pubkey::new_unique()); + let claim_statuses = &[(acct_0, tda), (acct_1, tda)] + .iter() + .map(|(claimant, tda)| { + Pubkey::find_program_address( + &[ClaimStatus::SEED, &claimant.to_bytes(), &tda.to_bytes()], + &JitoTipDistribution::id(), + ) + }) + .collect::>(); + let tree_nodes = vec![ + TreeNode { + claimant: acct_0, + claim_status_pubkey: claim_statuses[0].0, + claim_status_bump: claim_statuses[0].1, + staker_pubkey: Pubkey::default(), + withdrawer_pubkey: Pubkey::default(), + amount: 151_507, + proof: None, + }, + TreeNode { + claimant: acct_1, + claim_status_pubkey: claim_statuses[1].0, + claim_status_bump: claim_statuses[1].1, + staker_pubkey: Pubkey::default(), + withdrawer_pubkey: Pubkey::default(), + amount: 176_624, + proof: None, + }, + ]; + + // First the nodes are hashed and merkle tree constructed + let hashed_nodes: Vec<[u8; 32]> = tree_nodes.iter().map(|n| n.hash().to_bytes()).collect(); + let mk = MerkleTree::new(&hashed_nodes[..], true); + let root = mk.get_root().expect("to have valid root").to_bytes(); + + // verify first node + let node = solana_program::hash::hashv(&[&[0u8], &hashed_nodes[0]]); + let proof = get_proof(&mk, 0); + assert!(merkle_proof::verify(proof, root, node.to_bytes())); + + // verify second node + let node = solana_program::hash::hashv(&[&[0u8], &hashed_nodes[1]]); + let proof = get_proof(&mk, 1); + assert!(merkle_proof::verify(proof, root, node.to_bytes())); + } + + #[test] + fn test_new_from_stake_meta_collection_happy_path() { + let merkle_root_upload_authority = Pubkey::new_unique(); + + let (tda_0, tda_1) = (Pubkey::new_unique(), Pubkey::new_unique()); + + let stake_account_0 = Pubkey::new_unique(); + let stake_account_1 = Pubkey::new_unique(); + let stake_account_2 = Pubkey::new_unique(); + let stake_account_3 = Pubkey::new_unique(); + + let staker_account_0 = Pubkey::new_unique(); + let staker_account_1 = Pubkey::new_unique(); + let staker_account_2 = Pubkey::new_unique(); + let staker_account_3 = Pubkey::new_unique(); + + let validator_vote_account_0 = Pubkey::new_unique(); + let validator_vote_account_1 = Pubkey::new_unique(); + + let validator_id_0 = Pubkey::new_unique(); + let validator_id_1 = Pubkey::new_unique(); + + let stake_meta_collection = StakeMetaCollection { + stake_metas: vec![ + StakeMeta { + validator_vote_account: validator_vote_account_0, + validator_node_pubkey: validator_id_0, + maybe_tip_distribution_meta: Some(TipDistributionMeta { + merkle_root_upload_authority, + tip_distribution_pubkey: tda_0, + total_tips: 1_900_122_111_000, + validator_fee_bps: 100, + }), + delegations: vec![ + Delegation { + stake_account_pubkey: stake_account_0, + staker_pubkey: staker_account_0, + withdrawer_pubkey: staker_account_0, + lamports_delegated: 123_999_123_555, + }, + Delegation { + stake_account_pubkey: stake_account_1, + staker_pubkey: staker_account_1, + withdrawer_pubkey: staker_account_1, + lamports_delegated: 144_555_444_556, + }, + ], + total_delegated: 1_555_123_000_333_454_000, + commission: 100, + }, + StakeMeta { + validator_vote_account: validator_vote_account_1, + validator_node_pubkey: validator_id_1, + maybe_tip_distribution_meta: Some(TipDistributionMeta { + merkle_root_upload_authority, + tip_distribution_pubkey: tda_1, + total_tips: 1_900_122_111_333, + validator_fee_bps: 200, + }), + delegations: vec![ + Delegation { + stake_account_pubkey: stake_account_2, + staker_pubkey: staker_account_2, + withdrawer_pubkey: staker_account_2, + lamports_delegated: 224_555_444, + }, + Delegation { + stake_account_pubkey: stake_account_3, + staker_pubkey: staker_account_3, + withdrawer_pubkey: staker_account_3, + lamports_delegated: 700_888_944_555, + }, + ], + total_delegated: 2_565_318_909_444_123, + commission: 10, + }, + ], + tip_distribution_program_id: Pubkey::new_unique(), + bank_hash: Hash::new_unique().to_string(), + epoch: 100, + slot: 2_000_000, + }; + + let merkle_tree_collection = GeneratedMerkleTreeCollection::new_from_stake_meta_collection( + stake_meta_collection.clone(), + None, + ) + .unwrap(); + + assert_eq!(stake_meta_collection.epoch, merkle_tree_collection.epoch); + assert_eq!( + stake_meta_collection.bank_hash, + merkle_tree_collection.bank_hash + ); + assert_eq!(stake_meta_collection.slot, merkle_tree_collection.slot); + assert_eq!( + stake_meta_collection.stake_metas.len(), + merkle_tree_collection.generated_merkle_trees.len() + ); + let claim_statuses = &[ + (validator_vote_account_0, tda_0), + (stake_account_0, tda_0), + (stake_account_1, tda_0), + (validator_vote_account_1, tda_1), + (stake_account_2, tda_1), + (stake_account_3, tda_1), + ] + .iter() + .map(|(claimant, tda)| { + Pubkey::find_program_address( + &[ClaimStatus::SEED, &claimant.to_bytes(), &tda.to_bytes()], + &JitoTipDistribution::id(), + ) + }) + .collect::>(); + let tree_nodes = vec![ + TreeNode { + claimant: validator_vote_account_0, + claim_status_pubkey: claim_statuses[0].0, + claim_status_bump: claim_statuses[0].1, + staker_pubkey: Pubkey::default(), + withdrawer_pubkey: Pubkey::default(), + amount: 19_001_221_110, + proof: None, + }, + TreeNode { + claimant: stake_account_0, + claim_status_pubkey: claim_statuses[1].0, + claim_status_bump: claim_statuses[1].1, + staker_pubkey: Pubkey::default(), + withdrawer_pubkey: Pubkey::default(), + amount: 149_992, + proof: None, + }, + TreeNode { + claimant: stake_account_1, + claim_status_pubkey: claim_statuses[2].0, + claim_status_bump: claim_statuses[2].1, + staker_pubkey: Pubkey::default(), + withdrawer_pubkey: Pubkey::default(), + amount: 174_858, + proof: None, + }, + ]; + let hashed_nodes: Vec<[u8; 32]> = tree_nodes.iter().map(|n| n.hash().to_bytes()).collect(); + let merkle_tree = MerkleTree::new(&hashed_nodes[..], true); + let gmt_0 = GeneratedMerkleTree { + tip_distribution_account: tda_0, + merkle_root_upload_authority, + merkle_root: *merkle_tree.get_root().unwrap(), + tree_nodes, + max_total_claim: stake_meta_collection.stake_metas[0] + .clone() + .maybe_tip_distribution_meta + .unwrap() + .total_tips, + max_num_nodes: 3, + }; + + let tree_nodes = vec![ + TreeNode { + claimant: validator_vote_account_1, + claim_status_pubkey: claim_statuses[3].0, + claim_status_bump: claim_statuses[3].1, + staker_pubkey: Pubkey::default(), + withdrawer_pubkey: Pubkey::default(), + amount: 38_002_442_226, + proof: None, + }, + TreeNode { + claimant: stake_account_2, + claim_status_pubkey: claim_statuses[4].0, + claim_status_bump: claim_statuses[4].1, + staker_pubkey: Pubkey::default(), + withdrawer_pubkey: Pubkey::default(), + amount: 163_000, + proof: None, + }, + TreeNode { + claimant: stake_account_3, + claim_status_pubkey: claim_statuses[5].0, + claim_status_bump: claim_statuses[5].1, + staker_pubkey: Pubkey::default(), + withdrawer_pubkey: Pubkey::default(), + amount: 508_762_900, + proof: None, + }, + ]; + let hashed_nodes: Vec<[u8; 32]> = tree_nodes.iter().map(|n| n.hash().to_bytes()).collect(); + let merkle_tree = MerkleTree::new(&hashed_nodes[..], true); + let gmt_1 = GeneratedMerkleTree { + tip_distribution_account: tda_1, + merkle_root_upload_authority, + merkle_root: *merkle_tree.get_root().unwrap(), + tree_nodes, + max_total_claim: stake_meta_collection.stake_metas[1] + .clone() + .maybe_tip_distribution_meta + .unwrap() + .total_tips, + max_num_nodes: 3, + }; + + let expected_generated_merkle_trees = vec![gmt_0, gmt_1]; + let actual_generated_merkle_trees = merkle_tree_collection.generated_merkle_trees; + + expected_generated_merkle_trees + .iter() + .for_each(|expected_gmt| { + let actual_gmt = actual_generated_merkle_trees + .iter() + .find(|gmt| { + gmt.tip_distribution_account == expected_gmt.tip_distribution_account + }) + .unwrap(); + + assert_eq!(expected_gmt.max_num_nodes, actual_gmt.max_num_nodes); + assert_eq!(expected_gmt.max_total_claim, actual_gmt.max_total_claim); + assert_eq!( + expected_gmt.tip_distribution_account, + actual_gmt.tip_distribution_account + ); + assert_eq!(expected_gmt.tree_nodes.len(), actual_gmt.tree_nodes.len()); + expected_gmt + .tree_nodes + .iter() + .for_each(|expected_tree_node| { + let actual_tree_node = actual_gmt + .tree_nodes + .iter() + .find(|tree_node| tree_node.claimant == expected_tree_node.claimant) + .unwrap(); + assert_eq!(expected_tree_node.amount, actual_tree_node.amount); + }); + assert_eq!(expected_gmt.merkle_root, actual_gmt.merkle_root); + }); + } +} diff --git a/tip-distributor/src/merkle_root_generator_workflow.rs b/tip-distributor/src/merkle_root_generator_workflow.rs new file mode 100644 index 0000000000..bee3da016b --- /dev/null +++ b/tip-distributor/src/merkle_root_generator_workflow.rs @@ -0,0 +1,54 @@ +use { + crate::{read_json_from_file, GeneratedMerkleTreeCollection, StakeMetaCollection}, + log::*, + solana_client::rpc_client::RpcClient, + std::{ + fmt::Debug, + fs::File, + io::{BufWriter, Write}, + path::PathBuf, + }, + thiserror::Error, +}; + +#[derive(Error, Debug)] +pub enum MerkleRootGeneratorError { + #[error(transparent)] + IoError(#[from] std::io::Error), + + #[error(transparent)] + RpcError(#[from] Box), + + #[error(transparent)] + SerdeJsonError(#[from] serde_json::Error), +} + +pub fn generate_merkle_root( + stake_meta_coll_path: &PathBuf, + out_path: &PathBuf, + rpc_url: &str, +) -> Result<(), MerkleRootGeneratorError> { + let stake_meta_coll: StakeMetaCollection = read_json_from_file(stake_meta_coll_path)?; + + let rpc_client = RpcClient::new(rpc_url); + let merkle_tree_coll = GeneratedMerkleTreeCollection::new_from_stake_meta_collection( + stake_meta_coll, + Some(rpc_client), + )?; + + write_to_json_file(&merkle_tree_coll, out_path)?; + Ok(()) +} + +fn write_to_json_file( + merkle_tree_coll: &GeneratedMerkleTreeCollection, + file_path: &PathBuf, +) -> Result<(), MerkleRootGeneratorError> { + let file = File::create(file_path)?; + let mut writer = BufWriter::new(file); + let json = serde_json::to_string_pretty(&merkle_tree_coll).unwrap(); + writer.write_all(json.as_bytes())?; + writer.flush()?; + + Ok(()) +} diff --git a/tip-distributor/src/merkle_root_upload_workflow.rs b/tip-distributor/src/merkle_root_upload_workflow.rs new file mode 100644 index 0000000000..e40465581f --- /dev/null +++ b/tip-distributor/src/merkle_root_upload_workflow.rs @@ -0,0 +1,138 @@ +use { + crate::{ + read_json_from_file, sign_and_send_transactions_with_retries, GeneratedMerkleTree, + GeneratedMerkleTreeCollection, + }, + anchor_lang::AccountDeserialize, + jito_tip_distribution::{ + sdk::instruction::{upload_merkle_root_ix, UploadMerkleRootAccounts, UploadMerkleRootArgs}, + state::{Config, TipDistributionAccount}, + }, + log::{error, info}, + solana_client::nonblocking::rpc_client::RpcClient, + solana_program::{ + fee_calculator::DEFAULT_TARGET_LAMPORTS_PER_SIGNATURE, native_token::LAMPORTS_PER_SOL, + }, + solana_sdk::{ + commitment_config::CommitmentConfig, + pubkey::Pubkey, + signature::{read_keypair_file, Signer}, + transaction::Transaction, + }, + std::{path::PathBuf, time::Duration}, + thiserror::Error, + tokio::runtime::Builder, +}; + +#[derive(Error, Debug)] +pub enum MerkleRootUploadError { + #[error(transparent)] + IoError(#[from] std::io::Error), + + #[error(transparent)] + JsonError(#[from] serde_json::Error), +} + +pub fn upload_merkle_root( + merkle_root_path: &PathBuf, + keypair_path: &PathBuf, + rpc_url: &str, + tip_distribution_program_id: &Pubkey, + max_concurrent_rpc_get_reqs: usize, + txn_send_batch_size: usize, +) -> Result<(), MerkleRootUploadError> { + const MAX_RETRY_DURATION: Duration = Duration::from_secs(600); + + let merkle_tree: GeneratedMerkleTreeCollection = + read_json_from_file(merkle_root_path).expect("read GeneratedMerkleTreeCollection"); + let keypair = read_keypair_file(keypair_path).expect("read keypair file"); + + let tip_distribution_config = + Pubkey::find_program_address(&[Config::SEED], tip_distribution_program_id).0; + + let runtime = Builder::new_multi_thread() + .worker_threads(16) + .enable_all() + .build() + .expect("build runtime"); + + runtime.block_on(async move { + let rpc_client = + RpcClient::new_with_commitment(rpc_url.to_string(), CommitmentConfig::confirmed()); + let trees: Vec = merkle_tree + .generated_merkle_trees + .into_iter() + .filter(|tree| tree.merkle_root_upload_authority == keypair.pubkey()) + .collect(); + + info!("num trees to upload: {:?}", trees.len()); + + // heuristic to make sure we have enough funds to cover execution, assumes all trees need updating + { + let initial_balance = rpc_client.get_balance(&keypair.pubkey()).await.expect("failed to get balance"); + let desired_balance = (trees.len() as u64).checked_mul(DEFAULT_TARGET_LAMPORTS_PER_SIGNATURE).unwrap(); + if initial_balance < desired_balance { + let sol_to_deposit = desired_balance.checked_sub(initial_balance).unwrap().checked_add(LAMPORTS_PER_SOL).unwrap().checked_sub(1).unwrap().checked_div(LAMPORTS_PER_SOL).unwrap(); // rounds up to nearest sol + panic!("Expected to have at least {} lamports in {}, current balance is {} lamports, deposit {} SOL to continue.", + desired_balance, &keypair.pubkey(), initial_balance, sol_to_deposit) + } + } + let mut trees_needing_update: Vec = vec![]; + for tree in trees { + let account = rpc_client + .get_account(&tree.tip_distribution_account) + .await + .expect("fetch expect"); + + let mut data = account.data.as_slice(); + let fetched_tip_distribution_account = + TipDistributionAccount::try_deserialize(&mut data) + .expect("failed to deserialize tip_distribution_account state"); + + let needs_upload = match fetched_tip_distribution_account.merkle_root { + Some(merkle_root) => { + merkle_root.total_funds_claimed == 0 + && merkle_root.root != tree.merkle_root.to_bytes() + } + None => true, + }; + + if needs_upload { + trees_needing_update.push(tree); + } + } + + info!("num trees need uploading: {:?}", trees_needing_update.len()); + + let transactions: Vec = trees_needing_update + .iter() + .map(|tree| { + let ix = upload_merkle_root_ix( + *tip_distribution_program_id, + UploadMerkleRootArgs { + root: tree.merkle_root.to_bytes(), + max_total_claim: tree.max_total_claim, + max_num_nodes: tree.max_num_nodes, + }, + UploadMerkleRootAccounts { + config: tip_distribution_config, + merkle_root_upload_authority: keypair.pubkey(), + tip_distribution_account: tree.tip_distribution_account, + }, + ); + Transaction::new_with_payer( + &[ix], + Some(&keypair.pubkey()), + ) + }) + .collect(); + + let (to_process, failed_transactions) = sign_and_send_transactions_with_retries( + &keypair, &rpc_client, max_concurrent_rpc_get_reqs, transactions, txn_send_batch_size, MAX_RETRY_DURATION).await; + if !to_process.is_empty() { + panic!("{} remaining mev claim transactions, {} failed requests.", to_process.len(), failed_transactions.len()); + } + }); + + Ok(()) +} diff --git a/tip-distributor/src/reclaim_rent_workflow.rs b/tip-distributor/src/reclaim_rent_workflow.rs new file mode 100644 index 0000000000..fc48c89d61 --- /dev/null +++ b/tip-distributor/src/reclaim_rent_workflow.rs @@ -0,0 +1,310 @@ +use { + crate::{ + claim_mev_workflow::ClaimMevError, get_batched_accounts, + reclaim_rent_workflow::ClaimMevError::AnchorError, send_until_blockhash_expires, + }, + anchor_lang::AccountDeserialize, + jito_tip_distribution::{ + sdk::{ + derive_config_account_address, + instruction::{ + close_claim_status_ix, close_tip_distribution_account_ix, CloseClaimStatusAccounts, + CloseClaimStatusArgs, CloseTipDistributionAccountArgs, + CloseTipDistributionAccounts, + }, + }, + state::{ClaimStatus, Config, TipDistributionAccount}, + }, + log::{info, warn}, + rand::{prelude::SliceRandom, thread_rng}, + solana_client::nonblocking::rpc_client::RpcClient, + solana_metrics::datapoint_info, + solana_program::{clock::Epoch, pubkey::Pubkey}, + solana_rpc_client_api::config::RpcSimulateTransactionConfig, + solana_sdk::{ + account::Account, + commitment_config::CommitmentConfig, + compute_budget::ComputeBudgetInstruction, + signature::{Keypair, Signer}, + transaction::Transaction, + }, + std::{ + sync::Arc, + time::{Duration, Instant}, + }, +}; + +/// Clear old ClaimStatus accounts +pub async fn reclaim_rent( + rpc_url: String, + tip_distribution_program_id: Pubkey, + signer: Arc, + max_loop_duration: Duration, + // Optionally reclaim TipDistributionAccount rents on behalf of validators. + should_reclaim_tdas: bool, + micro_lamports: u64, +) -> Result<(), ClaimMevError> { + let rpc_client = RpcClient::new_with_timeout_and_commitment( + rpc_url.clone(), + Duration::from_secs(300), + CommitmentConfig::processed(), + ); + + let start = Instant::now(); + + let accounts = rpc_client + .get_program_accounts(&tip_distribution_program_id) + .await?; + + let config_pubkey = derive_config_account_address(&tip_distribution_program_id).0; + let config_account = rpc_client.get_account(&config_pubkey).await?; + let config_account = + Config::try_deserialize(&mut config_account.data.as_slice()).map_err(AnchorError)?; + + let epoch = rpc_client.get_epoch_info().await?.epoch; + let mut claim_status_pubkeys_to_expire = + find_expired_claim_status_accounts(&accounts, epoch, signer.pubkey()); + let mut tda_pubkeys_to_expire = find_expired_tda_accounts(&accounts, epoch); + + while start.elapsed() <= max_loop_duration { + let mut transactions = build_close_claim_status_transactions( + &claim_status_pubkeys_to_expire, + tip_distribution_program_id, + config_pubkey, + micro_lamports, + signer.pubkey(), + ); + if should_reclaim_tdas { + transactions.extend(build_close_tda_transactions( + &tda_pubkeys_to_expire, + tip_distribution_program_id, + config_pubkey, + &config_account, + signer.pubkey(), + )); + } + + datapoint_info!( + "claim_mev_workflow-prepare_rent_reclaim_transactions", + ("transaction_count", transactions.len(), i64), + ); + + if transactions.is_empty() { + info!("Finished reclaim rent!"); + return Ok(()); + } + + transactions.shuffle(&mut thread_rng()); + let transactions: Vec<_> = transactions.into_iter().take(10_000).collect(); + let blockhash = rpc_client.get_latest_blockhash().await?; + send_until_blockhash_expires(&rpc_client, transactions, blockhash, &signer).await?; + + // can just refresh calling get_multiple_accounts since these operations should be subtractive and not additive + let claim_status_pubkeys: Vec<_> = claim_status_pubkeys_to_expire + .iter() + .map(|(pubkey, _)| *pubkey) + .collect(); + claim_status_pubkeys_to_expire = get_batched_accounts(&rpc_client, &claim_status_pubkeys) + .await? + .into_iter() + .filter_map(|(pubkey, account)| Some((pubkey, account?))) + .collect(); + + let tda_pubkeys: Vec<_> = tda_pubkeys_to_expire + .iter() + .map(|(pubkey, _)| *pubkey) + .collect(); + tda_pubkeys_to_expire = get_batched_accounts(&rpc_client, &tda_pubkeys) + .await? + .into_iter() + .filter_map(|(pubkey, account)| Some((pubkey, account?))) + .collect(); + } + + // one final refresh before double checking everything + let claim_status_pubkeys: Vec<_> = claim_status_pubkeys_to_expire + .iter() + .map(|(pubkey, _)| *pubkey) + .collect(); + claim_status_pubkeys_to_expire = get_batched_accounts(&rpc_client, &claim_status_pubkeys) + .await? + .into_iter() + .filter_map(|(pubkey, account)| Some((pubkey, account?))) + .collect(); + + let tda_pubkeys: Vec<_> = tda_pubkeys_to_expire + .iter() + .map(|(pubkey, _)| *pubkey) + .collect(); + tda_pubkeys_to_expire = get_batched_accounts(&rpc_client, &tda_pubkeys) + .await? + .into_iter() + .filter_map(|(pubkey, account)| Some((pubkey, account?))) + .collect(); + + let mut transactions = build_close_claim_status_transactions( + &claim_status_pubkeys_to_expire, + tip_distribution_program_id, + config_pubkey, + micro_lamports, + signer.pubkey(), + ); + if should_reclaim_tdas { + transactions.extend(build_close_tda_transactions( + &tda_pubkeys_to_expire, + tip_distribution_program_id, + config_pubkey, + &config_account, + signer.pubkey(), + )); + } + + if transactions.is_empty() { + return Ok(()); + } + + // if more transactions left, we'll simulate them all to make sure its not an uncaught error + let mut is_error = false; + let mut error_str = String::new(); + for tx in &transactions { + match rpc_client + .simulate_transaction_with_config( + tx, + RpcSimulateTransactionConfig { + sig_verify: false, + replace_recent_blockhash: true, + commitment: Some(CommitmentConfig::processed()), + ..RpcSimulateTransactionConfig::default() + }, + ) + .await + { + Ok(_) => {} + Err(e) => { + error_str = e.to_string(); + is_error = true; + + match e.get_transaction_error() { + None => { + break; + } + Some(e) => { + warn!("transaction error. tx: {:?} error: {:?}", tx, e); + break; + } + } + } + } + } + + if is_error { + Err(ClaimMevError::UncaughtError { e: error_str }) + } else { + Err(ClaimMevError::NotFinished { + transactions_left: transactions.len(), + }) + } +} + +fn find_expired_claim_status_accounts( + accounts: &[(Pubkey, Account)], + epoch: Epoch, + payer: Pubkey, +) -> Vec<(Pubkey, Account)> { + accounts + .iter() + .filter_map(|(pubkey, account)| { + let claim_status = ClaimStatus::try_deserialize(&mut account.data.as_slice()).ok()?; + if claim_status.claim_status_payer.eq(&payer) && epoch > claim_status.expires_at { + Some((*pubkey, account.clone())) + } else { + None + } + }) + .collect() +} + +fn find_expired_tda_accounts( + accounts: &[(Pubkey, Account)], + epoch: Epoch, +) -> Vec<(Pubkey, Account)> { + accounts + .iter() + .filter_map(|(pubkey, account)| { + let tda = TipDistributionAccount::try_deserialize(&mut account.data.as_slice()).ok()?; + if epoch > tda.expires_at { + Some((*pubkey, account.clone())) + } else { + None + } + }) + .collect() +} + +/// Assumes accounts is already pre-filtered with checks to ensure the account can be closed +fn build_close_claim_status_transactions( + accounts: &[(Pubkey, Account)], + tip_distribution_program_id: Pubkey, + config: Pubkey, + microlamports: u64, + payer: Pubkey, +) -> Vec { + accounts + .iter() + .map(|(claim_status_pubkey, account)| { + let claim_status = ClaimStatus::try_deserialize(&mut account.data.as_slice()).unwrap(); + close_claim_status_ix( + tip_distribution_program_id, + CloseClaimStatusArgs, + CloseClaimStatusAccounts { + config, + claim_status: *claim_status_pubkey, + claim_status_payer: claim_status.claim_status_payer, + }, + ) + }) + .collect::>() + .chunks(4) + .map(|close_claim_status_instructions| { + let mut instructions = vec![ComputeBudgetInstruction::set_compute_unit_price( + microlamports, + )]; + instructions.extend(close_claim_status_instructions.to_vec()); + Transaction::new_with_payer(&instructions, Some(&payer)) + }) + .collect() +} + +fn build_close_tda_transactions( + accounts: &[(Pubkey, Account)], + tip_distribution_program_id: Pubkey, + config_pubkey: Pubkey, + config: &Config, + payer: Pubkey, +) -> Vec { + let instructions: Vec<_> = accounts + .iter() + .map(|(pubkey, account)| { + let tda = + TipDistributionAccount::try_deserialize(&mut account.data.as_slice()).unwrap(); + close_tip_distribution_account_ix( + tip_distribution_program_id, + CloseTipDistributionAccountArgs { + _epoch: tda.epoch_created_at, + }, + CloseTipDistributionAccounts { + config: config_pubkey, + tip_distribution_account: *pubkey, + validator_vote_account: tda.validator_vote_account, + expired_funds_account: config.expired_funds_account, + signer: payer, + }, + ) + }) + .collect(); + + instructions + .chunks(4) + .map(|ix_chunk| Transaction::new_with_payer(ix_chunk, Some(&payer))) + .collect() +} diff --git a/tip-distributor/src/stake_meta_generator_workflow.rs b/tip-distributor/src/stake_meta_generator_workflow.rs new file mode 100644 index 0000000000..bbbdfbd067 --- /dev/null +++ b/tip-distributor/src/stake_meta_generator_workflow.rs @@ -0,0 +1,966 @@ +use { + crate::{ + derive_tip_distribution_account_address, derive_tip_payment_pubkeys, Config, StakeMeta, + StakeMetaCollection, TipDistributionAccount, TipDistributionAccountWrapper, + TipDistributionMeta, + }, + anchor_lang::AccountDeserialize, + itertools::Itertools, + log::*, + solana_accounts_db::hardened_unpack::{open_genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE}, + solana_client::client_error::ClientError, + solana_ledger::{ + bank_forks_utils, + blockstore::{Blockstore, BlockstoreError}, + blockstore_options::{AccessType, BlockstoreOptions, LedgerColumnOptions}, + blockstore_processor::{BlockstoreProcessorError, ProcessOptions}, + }, + solana_runtime::{bank::Bank, snapshot_config::SnapshotConfig, stakes::StakeAccount}, + solana_sdk::{ + account::{ReadableAccount, WritableAccount}, + clock::Slot, + pubkey::Pubkey, + }, + solana_vote::vote_account::VoteAccount, + std::{ + collections::HashMap, + fmt::{Debug, Display, Formatter}, + fs::File, + io::{BufWriter, Write}, + mem::size_of, + path::{Path, PathBuf}, + sync::{atomic::AtomicBool, Arc}, + }, + thiserror::Error, +}; + +#[derive(Error, Debug)] +pub enum StakeMetaGeneratorError { + #[error(transparent)] + AnchorError(#[from] Box), + + #[error(transparent)] + BlockstoreError(#[from] BlockstoreError), + + #[error(transparent)] + BlockstoreProcessorError(#[from] BlockstoreProcessorError), + + #[error(transparent)] + IoError(#[from] std::io::Error), + + CheckedMathError, + + #[error(transparent)] + RpcError(#[from] ClientError), + + #[error(transparent)] + SerdeJsonError(#[from] serde_json::Error), + + SnapshotSlotNotFound, +} + +impl Display for StakeMetaGeneratorError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + Debug::fmt(&self, f) + } +} + +/// Runs the entire workflow of creating a bank from a snapshot to writing stake meta-data +/// to a JSON file. +pub fn generate_stake_meta( + ledger_path: &Path, + snapshot_slot: &Slot, + tip_distribution_program_id: &Pubkey, + out_path: &str, + tip_payment_program_id: &Pubkey, +) -> Result<(), StakeMetaGeneratorError> { + info!("Creating bank from ledger path..."); + let bank = create_bank_from_snapshot(ledger_path, snapshot_slot)?; + + info!("Generating stake_meta_collection object..."); + let stake_meta_coll = + generate_stake_meta_collection(&bank, tip_distribution_program_id, tip_payment_program_id)?; + + info!("Writing stake_meta_collection to JSON {}...", out_path); + write_to_json_file(&stake_meta_coll, out_path)?; + + Ok(()) +} + +fn create_bank_from_snapshot( + ledger_path: &Path, + snapshot_slot: &Slot, +) -> Result, StakeMetaGeneratorError> { + let genesis_config = open_genesis_config(ledger_path, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE); + let snapshot_config = SnapshotConfig { + full_snapshot_archive_interval_slots: Slot::MAX, + incremental_snapshot_archive_interval_slots: Slot::MAX, + full_snapshot_archives_dir: PathBuf::from(ledger_path), + incremental_snapshot_archives_dir: PathBuf::from(ledger_path), + bank_snapshots_dir: PathBuf::from(ledger_path), + ..SnapshotConfig::default() + }; + let blockstore = Blockstore::open_with_options( + ledger_path, + BlockstoreOptions { + access_type: AccessType::PrimaryForMaintenance, + recovery_mode: None, + enforce_ulimit_nofile: false, + column_options: LedgerColumnOptions::default(), + }, + )?; + let (bank_forks, _, _) = bank_forks_utils::load_bank_forks( + &genesis_config, + &blockstore, + vec![PathBuf::from(ledger_path).join(Path::new("stake-meta.accounts"))], + None, + Some(&snapshot_config), + &ProcessOptions::default(), + None, + None, + None, + Arc::new(AtomicBool::new(false)), + false, + ); + + let working_bank = bank_forks.read().unwrap().working_bank(); + assert_eq!( + working_bank.slot(), + *snapshot_slot, + "expected working bank slot {}, found {}", + snapshot_slot, + working_bank.slot() + ); + + Ok(working_bank) +} + +fn write_to_json_file( + stake_meta_coll: &StakeMetaCollection, + out_path: &str, +) -> Result<(), StakeMetaGeneratorError> { + let file = File::create(out_path)?; + let mut writer = BufWriter::new(file); + let json = serde_json::to_string_pretty(&stake_meta_coll).unwrap(); + writer.write_all(json.as_bytes())?; + writer.flush()?; + + Ok(()) +} + +/// Creates a collection of [StakeMeta]'s from the given bank. +pub fn generate_stake_meta_collection( + bank: &Arc, + tip_distribution_program_id: &Pubkey, + tip_payment_program_id: &Pubkey, +) -> Result { + assert!(bank.is_frozen()); + + let epoch_vote_accounts = bank.epoch_vote_accounts(bank.epoch()).unwrap_or_else(|| { + panic!( + "No epoch_vote_accounts found for slot {} at epoch {}", + bank.slot(), + bank.epoch() + ) + }); + + let l_stakes = bank.stakes_cache.stakes(); + let delegations = l_stakes.stake_delegations(); + + let voter_pubkey_to_delegations = group_delegations_by_voter_pubkey(delegations, bank); + + // the last leader in an epoch may not crank the tip program before the epoch is over, which + // would result in MEV rewards for epoch N not being cranked until epoch N + 1. This means that + // the account balance in the snapshot could be incorrect. + // We assume that the rewards sitting in the tip program PDAs are cranked out by the time all of + // the rewards are claimed. + let tip_accounts = derive_tip_payment_pubkeys(tip_payment_program_id); + let account = bank + .get_account(&tip_accounts.config_pda) + .expect("config pda exists"); + + let config = Config::try_deserialize(&mut account.data()).expect("deserializes configuration"); + + let bb_commission_pct: u64 = config.block_builder_commission_pct; + let tip_receiver: Pubkey = config.tip_receiver; + + // includes the block builder fee + let excess_tip_balances: u64 = tip_accounts + .tip_pdas + .iter() + .map(|pubkey| { + let tip_account = bank.get_account(pubkey).expect("tip account exists"); + tip_account + .lamports() + .checked_sub(bank.get_minimum_balance_for_rent_exemption(tip_account.data().len())) + .expect("tip balance underflow") + }) + .sum(); + // matches math in tip payment program + let block_builder_tips = excess_tip_balances + .checked_mul(bb_commission_pct) + .expect("block_builder_tips overflow") + .checked_div(100) + .expect("block_builder_tips division error"); + let tip_receiver_fee = excess_tip_balances + .checked_sub(block_builder_tips) + .expect("tip_receiver_fee doesnt underflow"); + + let vote_pk_and_maybe_tdas: Vec<( + (Pubkey, &VoteAccount), + Option, + )> = epoch_vote_accounts + .iter() + .map(|(vote_pubkey, (_total_stake, vote_account))| { + let tip_distribution_pubkey = derive_tip_distribution_account_address( + tip_distribution_program_id, + vote_pubkey, + bank.epoch(), + ) + .0; + let tda = if let Some(mut account_data) = bank.get_account(&tip_distribution_pubkey) { + // TDAs may be funded with lamports and therefore exist in the bank, but would fail the deserialization step + // if the buffer is yet to be allocated thru the init call to the program. + if let Ok(tip_distribution_account) = + TipDistributionAccount::try_deserialize(&mut account_data.data()) + { + // this snapshot might have tips that weren't claimed by the time the epoch is over + // assume that it will eventually be cranked and credit the excess to this account + if tip_distribution_pubkey == tip_receiver { + account_data.set_lamports( + account_data + .lamports() + .checked_add(tip_receiver_fee) + .expect("tip overflow"), + ); + } + Some(TipDistributionAccountWrapper { + tip_distribution_account, + account_data, + tip_distribution_pubkey, + }) + } else { + None + } + } else { + None + }; + Ok(((*vote_pubkey, vote_account), tda)) + }) + .collect::>()?; + + let mut stake_metas = vec![]; + for ((vote_pubkey, vote_account), maybe_tda) in vote_pk_and_maybe_tdas { + if let Some(mut delegations) = voter_pubkey_to_delegations.get(&vote_pubkey).cloned() { + let total_delegated = delegations.iter().fold(0u64, |sum, delegation| { + sum.checked_add(delegation.lamports_delegated).unwrap() + }); + + let maybe_tip_distribution_meta = if let Some(tda) = maybe_tda { + let actual_len = tda.account_data.data().len(); + let expected_len = 8_usize.saturating_add(size_of::()); + if actual_len != expected_len { + warn!("len mismatch actual={actual_len}, expected={expected_len}"); + } + let rent_exempt_amount = + bank.get_minimum_balance_for_rent_exemption(tda.account_data.data().len()); + + Some(TipDistributionMeta::from_tda_wrapper( + tda, + rent_exempt_amount, + )?) + } else { + None + }; + + let vote_state = vote_account.vote_state().unwrap(); + delegations.sort(); + stake_metas.push(StakeMeta { + maybe_tip_distribution_meta, + validator_node_pubkey: vote_state.node_pubkey, + validator_vote_account: vote_pubkey, + delegations, + total_delegated, + commission: vote_state.commission, + }); + } else { + warn!( + "voter_pubkey not found in voter_pubkey_to_delegations map [validator_vote_pubkey={}]", + vote_pubkey + ); + } + } + stake_metas.sort(); + + Ok(StakeMetaCollection { + stake_metas, + tip_distribution_program_id: *tip_distribution_program_id, + bank_hash: bank.hash().to_string(), + epoch: bank.epoch(), + slot: bank.slot(), + }) +} + +/// Given an [EpochStakes] object, return delegations grouped by voter_pubkey (validator delegated to). +fn group_delegations_by_voter_pubkey( + delegations: &im::HashMap, + bank: &Bank, +) -> HashMap> { + delegations + .into_iter() + .filter(|(_stake_pubkey, stake_account)| { + stake_account.delegation().stake( + bank.epoch(), + None, + bank.new_warmup_cooldown_rate_epoch(), + ) > 0 + }) + .into_group_map_by(|(_stake_pubkey, stake_account)| stake_account.delegation().voter_pubkey) + .into_iter() + .map(|(voter_pubkey, group)| { + ( + voter_pubkey, + group + .into_iter() + .map(|(stake_pubkey, stake_account)| crate::Delegation { + stake_account_pubkey: *stake_pubkey, + staker_pubkey: stake_account + .stake_state() + .authorized() + .map(|a| a.staker) + .unwrap_or_default(), + withdrawer_pubkey: stake_account + .stake_state() + .authorized() + .map(|a| a.withdrawer) + .unwrap_or_default(), + lamports_delegated: stake_account.delegation().stake, + }) + .collect::>(), + ) + }) + .collect() +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::derive_tip_distribution_account_address, + anchor_lang::AccountSerialize, + jito_tip_distribution::state::TipDistributionAccount, + jito_tip_payment::{ + InitBumps, TipPaymentAccount, CONFIG_ACCOUNT_SEED, TIP_ACCOUNT_SEED_0, + TIP_ACCOUNT_SEED_1, TIP_ACCOUNT_SEED_2, TIP_ACCOUNT_SEED_3, TIP_ACCOUNT_SEED_4, + TIP_ACCOUNT_SEED_5, TIP_ACCOUNT_SEED_6, TIP_ACCOUNT_SEED_7, + }, + solana_runtime::genesis_utils::{ + create_genesis_config_with_vote_accounts, GenesisConfigInfo, ValidatorVoteKeypairs, + }, + solana_sdk::{ + self, + account::{from_account, AccountSharedData}, + message::Message, + signature::{Keypair, Signer}, + stake::{ + self, + state::{Authorized, Lockup}, + }, + stake_history::StakeHistory, + sysvar, + transaction::Transaction, + }, + solana_stake_program::stake_state, + }; + + #[test] + fn test_generate_stake_meta_collection_happy_path() { + /* 1. Create a Bank seeded with some validator stake accounts */ + let validator_keypairs_0 = ValidatorVoteKeypairs::new_rand(); + let validator_keypairs_1 = ValidatorVoteKeypairs::new_rand(); + let validator_keypairs_2 = ValidatorVoteKeypairs::new_rand(); + let validator_keypairs = vec![ + &validator_keypairs_0, + &validator_keypairs_1, + &validator_keypairs_2, + ]; + const INITIAL_VALIDATOR_STAKES: u64 = 10_000; + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config_with_vote_accounts( + 1_000_000_000, + &validator_keypairs, + vec![INITIAL_VALIDATOR_STAKES; 3], + ); + + let bank = Bank::new_for_tests(&genesis_config); + + /* 2. Seed the Bank with [TipDistributionAccount]'s */ + let merkle_root_upload_authority = Pubkey::new_unique(); + let tip_distribution_program_id = Pubkey::new_unique(); + let tip_payment_program_id = Pubkey::new_unique(); + + let delegator_0 = Keypair::new(); + let delegator_1 = Keypair::new(); + let delegator_2 = Keypair::new(); + let delegator_3 = Keypair::new(); + let delegator_4 = Keypair::new(); + + let delegator_0_pk = delegator_0.pubkey(); + let delegator_1_pk = delegator_1.pubkey(); + let delegator_2_pk = delegator_2.pubkey(); + let delegator_3_pk = delegator_3.pubkey(); + let delegator_4_pk = delegator_4.pubkey(); + + let d_0_data = AccountSharedData::new( + 300_000_000_000_000 * 10, + 0, + &solana_sdk::system_program::id(), + ); + let d_1_data = AccountSharedData::new( + 100_000_203_000_000 * 10, + 0, + &solana_sdk::system_program::id(), + ); + let d_2_data = AccountSharedData::new( + 100_000_235_899_000 * 10, + 0, + &solana_sdk::system_program::id(), + ); + let d_3_data = AccountSharedData::new( + 200_000_000_000_000 * 10, + 0, + &solana_sdk::system_program::id(), + ); + let d_4_data = AccountSharedData::new( + 100_000_000_777_000 * 10, + 0, + &solana_sdk::system_program::id(), + ); + + bank.store_account(&delegator_0_pk, &d_0_data); + bank.store_account(&delegator_1_pk, &d_1_data); + bank.store_account(&delegator_2_pk, &d_2_data); + bank.store_account(&delegator_3_pk, &d_3_data); + bank.store_account(&delegator_4_pk, &d_4_data); + + /* 3. Delegate some stake to the initial set of validators */ + let mut validator_0_delegations = vec![crate::Delegation { + stake_account_pubkey: validator_keypairs_0.stake_keypair.pubkey(), + staker_pubkey: validator_keypairs_0.stake_keypair.pubkey(), + withdrawer_pubkey: validator_keypairs_0.stake_keypair.pubkey(), + lamports_delegated: INITIAL_VALIDATOR_STAKES, + }]; + let stake_account = delegate_stake_helper( + &bank, + &delegator_0, + &validator_keypairs_0.vote_keypair.pubkey(), + 30_000_000_000, + ); + validator_0_delegations.push(crate::Delegation { + stake_account_pubkey: stake_account, + staker_pubkey: delegator_0.pubkey(), + withdrawer_pubkey: delegator_0.pubkey(), + lamports_delegated: 30_000_000_000, + }); + let stake_account = delegate_stake_helper( + &bank, + &delegator_1, + &validator_keypairs_0.vote_keypair.pubkey(), + 3_000_000_000, + ); + validator_0_delegations.push(crate::Delegation { + stake_account_pubkey: stake_account, + staker_pubkey: delegator_1.pubkey(), + withdrawer_pubkey: delegator_1.pubkey(), + lamports_delegated: 3_000_000_000, + }); + let stake_account = delegate_stake_helper( + &bank, + &delegator_2, + &validator_keypairs_0.vote_keypair.pubkey(), + 33_000_000_000, + ); + validator_0_delegations.push(crate::Delegation { + stake_account_pubkey: stake_account, + staker_pubkey: delegator_2.pubkey(), + withdrawer_pubkey: delegator_2.pubkey(), + lamports_delegated: 33_000_000_000, + }); + + let mut validator_1_delegations = vec![crate::Delegation { + stake_account_pubkey: validator_keypairs_1.stake_keypair.pubkey(), + staker_pubkey: validator_keypairs_1.stake_keypair.pubkey(), + withdrawer_pubkey: validator_keypairs_1.stake_keypair.pubkey(), + lamports_delegated: INITIAL_VALIDATOR_STAKES, + }]; + let stake_account = delegate_stake_helper( + &bank, + &delegator_3, + &validator_keypairs_1.vote_keypair.pubkey(), + 4_222_364_000, + ); + validator_1_delegations.push(crate::Delegation { + stake_account_pubkey: stake_account, + staker_pubkey: delegator_3.pubkey(), + withdrawer_pubkey: delegator_3.pubkey(), + lamports_delegated: 4_222_364_000, + }); + let stake_account = delegate_stake_helper( + &bank, + &delegator_4, + &validator_keypairs_1.vote_keypair.pubkey(), + 6_000_000_527, + ); + validator_1_delegations.push(crate::Delegation { + stake_account_pubkey: stake_account, + staker_pubkey: delegator_4.pubkey(), + withdrawer_pubkey: delegator_4.pubkey(), + lamports_delegated: 6_000_000_527, + }); + + let mut validator_2_delegations = vec![crate::Delegation { + stake_account_pubkey: validator_keypairs_2.stake_keypair.pubkey(), + staker_pubkey: validator_keypairs_2.stake_keypair.pubkey(), + withdrawer_pubkey: validator_keypairs_2.stake_keypair.pubkey(), + lamports_delegated: INITIAL_VALIDATOR_STAKES, + }]; + let stake_account = delegate_stake_helper( + &bank, + &delegator_0, + &validator_keypairs_2.vote_keypair.pubkey(), + 1_300_123_156, + ); + validator_2_delegations.push(crate::Delegation { + stake_account_pubkey: stake_account, + staker_pubkey: delegator_0.pubkey(), + withdrawer_pubkey: delegator_0.pubkey(), + lamports_delegated: 1_300_123_156, + }); + let stake_account = delegate_stake_helper( + &bank, + &delegator_4, + &validator_keypairs_2.vote_keypair.pubkey(), + 1_610_565_420, + ); + validator_2_delegations.push(crate::Delegation { + stake_account_pubkey: stake_account, + staker_pubkey: delegator_4.pubkey(), + withdrawer_pubkey: delegator_4.pubkey(), + lamports_delegated: 1_610_565_420, + }); + + /* 4. Run assertions */ + fn warmed_up(bank: &Bank, stake_pubkeys: &[Pubkey]) -> bool { + for stake_pubkey in stake_pubkeys { + let stake = + stake_state::stake_from(&bank.get_account(stake_pubkey).unwrap()).unwrap(); + + if stake.delegation.stake + != stake.stake( + bank.epoch(), + Some( + &from_account::( + &bank.get_account(&sysvar::stake_history::id()).unwrap(), + ) + .unwrap(), + ), + bank.new_warmup_cooldown_rate_epoch(), + ) + { + return false; + } + } + + true + } + fn next_epoch(bank: &Arc) -> Arc { + bank.squash(); + + Arc::new(Bank::new_from_parent( + bank.clone(), + &Pubkey::default(), + bank.get_slots_in_epoch(bank.epoch()) + bank.slot(), + )) + } + + let mut bank = Arc::new(bank); + let mut stake_pubkeys = validator_0_delegations + .iter() + .map(|v| v.stake_account_pubkey) + .collect::>(); + stake_pubkeys.extend( + validator_1_delegations + .iter() + .map(|v| v.stake_account_pubkey), + ); + stake_pubkeys.extend( + validator_2_delegations + .iter() + .map(|v| v.stake_account_pubkey), + ); + loop { + if warmed_up(&bank, &stake_pubkeys[..]) { + break; + } + + // Cycle thru banks until we're fully warmed up + bank = next_epoch(&bank); + } + + let tip_distribution_account_0 = derive_tip_distribution_account_address( + &tip_distribution_program_id, + &validator_keypairs_0.vote_keypair.pubkey(), + bank.epoch(), + ); + let tip_distribution_account_1 = derive_tip_distribution_account_address( + &tip_distribution_program_id, + &validator_keypairs_1.vote_keypair.pubkey(), + bank.epoch(), + ); + let tip_distribution_account_2 = derive_tip_distribution_account_address( + &tip_distribution_program_id, + &validator_keypairs_2.vote_keypair.pubkey(), + bank.epoch(), + ); + + let expires_at = bank.epoch() + 3; + + let tda_0 = TipDistributionAccount { + validator_vote_account: validator_keypairs_0.vote_keypair.pubkey(), + merkle_root_upload_authority, + merkle_root: None, + epoch_created_at: bank.epoch(), + validator_commission_bps: 50, + expires_at, + bump: tip_distribution_account_0.1, + }; + let tda_1 = TipDistributionAccount { + validator_vote_account: validator_keypairs_1.vote_keypair.pubkey(), + merkle_root_upload_authority, + merkle_root: None, + epoch_created_at: bank.epoch(), + validator_commission_bps: 500, + expires_at: 0, + bump: tip_distribution_account_1.1, + }; + let tda_2 = TipDistributionAccount { + validator_vote_account: validator_keypairs_2.vote_keypair.pubkey(), + merkle_root_upload_authority, + merkle_root: None, + epoch_created_at: bank.epoch(), + validator_commission_bps: 75, + expires_at: 0, + bump: tip_distribution_account_2.1, + }; + + let tip_distro_0_tips = 1_000_000 * 10; + let tip_distro_1_tips = 69_000_420 * 10; + let tip_distro_2_tips = 789_000_111 * 10; + + let tda_0_fields = (tip_distribution_account_0.0, tda_0.validator_commission_bps); + let data_0 = + tda_to_account_shared_data(&tip_distribution_program_id, tip_distro_0_tips, tda_0); + let tda_1_fields = (tip_distribution_account_1.0, tda_1.validator_commission_bps); + let data_1 = + tda_to_account_shared_data(&tip_distribution_program_id, tip_distro_1_tips, tda_1); + let tda_2_fields = (tip_distribution_account_2.0, tda_2.validator_commission_bps); + let data_2 = + tda_to_account_shared_data(&tip_distribution_program_id, tip_distro_2_tips, tda_2); + + let accounts_data = create_config_account_data(&tip_payment_program_id, &bank); + for (pubkey, data) in accounts_data { + bank.store_account(&pubkey, &data); + } + + bank.store_account(&tip_distribution_account_0.0, &data_0); + bank.store_account(&tip_distribution_account_1.0, &data_1); + bank.store_account(&tip_distribution_account_2.0, &data_2); + + bank.freeze(); + let stake_meta_collection = generate_stake_meta_collection( + &bank, + &tip_distribution_program_id, + &tip_payment_program_id, + ) + .unwrap(); + assert_eq!( + stake_meta_collection.tip_distribution_program_id, + tip_distribution_program_id + ); + assert_eq!(stake_meta_collection.slot, bank.slot()); + assert_eq!(stake_meta_collection.epoch, bank.epoch()); + + let mut expected_stake_metas = HashMap::new(); + expected_stake_metas.insert( + validator_keypairs_0.vote_keypair.pubkey(), + StakeMeta { + validator_vote_account: validator_keypairs_0.vote_keypair.pubkey(), + delegations: validator_0_delegations.clone(), + total_delegated: validator_0_delegations + .iter() + .fold(0u64, |sum, delegation| { + sum.checked_add(delegation.lamports_delegated).unwrap() + }), + maybe_tip_distribution_meta: Some(TipDistributionMeta { + merkle_root_upload_authority, + tip_distribution_pubkey: tda_0_fields.0, + total_tips: tip_distro_0_tips + .checked_sub( + bank.get_minimum_balance_for_rent_exemption( + TipDistributionAccount::SIZE, + ), + ) + .unwrap(), + validator_fee_bps: tda_0_fields.1, + }), + commission: 0, + validator_node_pubkey: validator_keypairs_0.node_keypair.pubkey(), + }, + ); + expected_stake_metas.insert( + validator_keypairs_1.vote_keypair.pubkey(), + StakeMeta { + validator_vote_account: validator_keypairs_1.vote_keypair.pubkey(), + delegations: validator_1_delegations.clone(), + total_delegated: validator_1_delegations + .iter() + .fold(0u64, |sum, delegation| { + sum.checked_add(delegation.lamports_delegated).unwrap() + }), + maybe_tip_distribution_meta: Some(TipDistributionMeta { + merkle_root_upload_authority, + tip_distribution_pubkey: tda_1_fields.0, + total_tips: tip_distro_1_tips + .checked_sub( + bank.get_minimum_balance_for_rent_exemption( + TipDistributionAccount::SIZE, + ), + ) + .unwrap(), + validator_fee_bps: tda_1_fields.1, + }), + commission: 0, + validator_node_pubkey: validator_keypairs_1.node_keypair.pubkey(), + }, + ); + expected_stake_metas.insert( + validator_keypairs_2.vote_keypair.pubkey(), + StakeMeta { + validator_vote_account: validator_keypairs_2.vote_keypair.pubkey(), + delegations: validator_2_delegations.clone(), + total_delegated: validator_2_delegations + .iter() + .fold(0u64, |sum, delegation| { + sum.checked_add(delegation.lamports_delegated).unwrap() + }), + maybe_tip_distribution_meta: Some(TipDistributionMeta { + merkle_root_upload_authority, + tip_distribution_pubkey: tda_2_fields.0, + total_tips: tip_distro_2_tips + .checked_sub( + bank.get_minimum_balance_for_rent_exemption( + TipDistributionAccount::SIZE, + ), + ) + .unwrap(), + validator_fee_bps: tda_2_fields.1, + }), + commission: 0, + validator_node_pubkey: validator_keypairs_2.node_keypair.pubkey(), + }, + ); + + println!( + "validator_0 [vote_account={}, stake_account={}]", + validator_keypairs_0.vote_keypair.pubkey(), + validator_keypairs_0.stake_keypair.pubkey() + ); + println!( + "validator_1 [vote_account={}, stake_account={}]", + validator_keypairs_1.vote_keypair.pubkey(), + validator_keypairs_1.stake_keypair.pubkey() + ); + println!( + "validator_2 [vote_account={}, stake_account={}]", + validator_keypairs_2.vote_keypair.pubkey(), + validator_keypairs_2.stake_keypair.pubkey(), + ); + + assert_eq!( + expected_stake_metas.len(), + stake_meta_collection.stake_metas.len() + ); + + for actual_stake_meta in stake_meta_collection.stake_metas { + let expected_stake_meta = expected_stake_metas + .get(&actual_stake_meta.validator_vote_account) + .unwrap(); + assert_eq!( + expected_stake_meta.maybe_tip_distribution_meta, + actual_stake_meta.maybe_tip_distribution_meta + ); + assert_eq!( + expected_stake_meta.total_delegated, + actual_stake_meta.total_delegated + ); + assert_eq!(expected_stake_meta.commission, actual_stake_meta.commission); + assert_eq!( + expected_stake_meta.validator_vote_account, + actual_stake_meta.validator_vote_account + ); + + assert_eq!( + expected_stake_meta.delegations.len(), + actual_stake_meta.delegations.len() + ); + + for expected_delegation in &expected_stake_meta.delegations { + let actual_delegation = actual_stake_meta + .delegations + .iter() + .find(|d| d.stake_account_pubkey == expected_delegation.stake_account_pubkey) + .unwrap(); + + assert_eq!(expected_delegation, actual_delegation); + } + } + } + + /// Helper function that sends a delegate stake instruction to the bank. + /// Returns the created stake account pubkey. + fn delegate_stake_helper( + bank: &Bank, + from_keypair: &Keypair, + vote_account: &Pubkey, + delegation_amount: u64, + ) -> Pubkey { + let minimum_delegation = solana_stake_program::get_minimum_delegation(&bank.feature_set); + assert!( + delegation_amount >= minimum_delegation, + "{}", + format!( + "received delegation_amount {}, must be at least {}", + delegation_amount, minimum_delegation + ) + ); + if let Some(from_account) = bank.get_account(&from_keypair.pubkey()) { + assert_eq!(from_account.owner(), &solana_sdk::system_program::id()); + } else { + panic!("from_account DNE"); + } + assert!(bank.get_account(vote_account).is_some()); + + let stake_keypair = Keypair::new(); + let instructions = stake::instruction::create_account_and_delegate_stake( + &from_keypair.pubkey(), + &stake_keypair.pubkey(), + vote_account, + &Authorized::auto(&from_keypair.pubkey()), + &Lockup::default(), + delegation_amount, + ); + + let message = Message::new(&instructions[..], Some(&from_keypair.pubkey())); + let transaction = Transaction::new( + &[from_keypair, &stake_keypair], + message, + bank.last_blockhash(), + ); + + bank.process_transaction(&transaction) + .map_err(|e| { + eprintln!("Error delegating stake [error={}]", e); + e + }) + .unwrap(); + + stake_keypair.pubkey() + } + + fn tda_to_account_shared_data( + tip_distribution_program_id: &Pubkey, + lamports: u64, + tda: TipDistributionAccount, + ) -> AccountSharedData { + let mut account_data = AccountSharedData::new( + lamports, + TipDistributionAccount::SIZE, + tip_distribution_program_id, + ); + + let mut data: [u8; TipDistributionAccount::SIZE] = [0u8; TipDistributionAccount::SIZE]; + let mut cursor = std::io::Cursor::new(&mut data[..]); + tda.try_serialize(&mut cursor).unwrap(); + + account_data.set_data(data.to_vec()); + account_data + } + + fn create_config_account_data( + tip_payment_program_id: &Pubkey, + bank: &Bank, + ) -> Vec<(Pubkey, AccountSharedData)> { + let mut account_datas = vec![]; + + let config_pda = + Pubkey::find_program_address(&[CONFIG_ACCOUNT_SEED], tip_payment_program_id); + + let tip_accounts = [ + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_0], tip_payment_program_id), + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_1], tip_payment_program_id), + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_2], tip_payment_program_id), + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_3], tip_payment_program_id), + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_4], tip_payment_program_id), + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_5], tip_payment_program_id), + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_6], tip_payment_program_id), + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_7], tip_payment_program_id), + ]; + + let config = Config { + tip_receiver: Pubkey::new_unique(), + block_builder: Pubkey::new_unique(), + block_builder_commission_pct: 10, + bumps: InitBumps { + config: config_pda.1, + tip_payment_account_0: tip_accounts[0].1, + tip_payment_account_1: tip_accounts[1].1, + tip_payment_account_2: tip_accounts[2].1, + tip_payment_account_3: tip_accounts[3].1, + tip_payment_account_4: tip_accounts[4].1, + tip_payment_account_5: tip_accounts[5].1, + tip_payment_account_6: tip_accounts[6].1, + tip_payment_account_7: tip_accounts[7].1, + }, + }; + + let mut config_account_data = AccountSharedData::new( + bank.get_minimum_balance_for_rent_exemption(Config::SIZE), + Config::SIZE, + tip_payment_program_id, + ); + + let mut config_data: [u8; Config::SIZE] = [0u8; Config::SIZE]; + let mut config_cursor = std::io::Cursor::new(&mut config_data[..]); + config.try_serialize(&mut config_cursor).unwrap(); + config_account_data.set_data(config_data.to_vec()); + account_datas.push((config_pda.0, config_account_data)); + + account_datas.extend(tip_accounts.into_iter().map(|(pubkey, _)| { + let mut tip_account_data = AccountSharedData::new( + bank.get_minimum_balance_for_rent_exemption(TipPaymentAccount::SIZE), + TipPaymentAccount::SIZE, + tip_payment_program_id, + ); + + let mut data: [u8; TipPaymentAccount::SIZE] = [0u8; TipPaymentAccount::SIZE]; + let mut cursor = std::io::Cursor::new(&mut data[..]); + TipPaymentAccount::default() + .try_serialize(&mut cursor) + .unwrap(); + tip_account_data.set_data(data.to_vec()); + + (pubkey, tip_account_data) + })); + + account_datas + } +} diff --git a/transaction-status/src/lib.rs b/transaction-status/src/lib.rs index 84654a564c..4715afb3c7 100644 --- a/transaction-status/src/lib.rs +++ b/transaction-status/src/lib.rs @@ -25,7 +25,7 @@ use { }, transaction_context::TransactionReturnData, }, - std::fmt, + std::{collections::HashMap, fmt}, thiserror::Error, }; @@ -278,6 +278,13 @@ impl From for UiInnerInstructions { } } +#[derive(Default)] +pub struct PreBalanceInfo { + pub native: Vec>, + pub token: Vec>, + pub mint_decimals: HashMap, +} + #[derive(Clone, Debug, PartialEq)] pub struct TransactionTokenBalance { pub account_index: u8, diff --git a/turbine/benches/cluster_info.rs b/turbine/benches/cluster_info.rs index 1f15137175..fffca1126f 100644 --- a/turbine/benches/cluster_info.rs +++ b/turbine/benches/cluster_info.rs @@ -76,6 +76,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) { &bank_forks, &SocketAddrSpace::Unspecified, &quic_endpoint_sender, + &None, ) .unwrap(); }); diff --git a/turbine/benches/retransmit_stage.rs b/turbine/benches/retransmit_stage.rs index bfd68239fe..44b3f918d1 100644 --- a/turbine/benches/retransmit_stage.rs +++ b/turbine/benches/retransmit_stage.rs @@ -32,7 +32,7 @@ use { net::{Ipv4Addr, UdpSocket}, sync::{ atomic::{AtomicUsize, Ordering}, - Arc, + Arc, RwLock, }, thread::{sleep, Builder}, time::Duration, @@ -123,6 +123,7 @@ fn bench_retransmitter(bencher: &mut Bencher) { shreds_receiver, Arc::default(), // solana_rpc::max_slots::MaxSlots None, + Arc::new(RwLock::new(None)), ); let mut index = 0; diff --git a/turbine/src/broadcast_stage.rs b/turbine/src/broadcast_stage.rs index cccda977b1..53701181c0 100644 --- a/turbine/src/broadcast_stage.rs +++ b/turbine/src/broadcast_stage.rs @@ -108,6 +108,7 @@ impl BroadcastStageType { bank_forks: Arc>, shred_version: u16, quic_endpoint_sender: AsyncSender<(SocketAddr, Bytes)>, + shred_receiver_address: Arc>>, ) -> BroadcastStage { match self { BroadcastStageType::Standard => BroadcastStage::new( @@ -120,6 +121,7 @@ impl BroadcastStageType { bank_forks, quic_endpoint_sender, StandardBroadcastRun::new(shred_version), + shred_receiver_address, ), BroadcastStageType::FailEntryVerification => BroadcastStage::new( @@ -132,6 +134,7 @@ impl BroadcastStageType { bank_forks, quic_endpoint_sender, FailEntryVerificationBroadcastRun::new(shred_version), + Arc::new(RwLock::new(None)), ), BroadcastStageType::BroadcastFakeShreds => BroadcastStage::new( @@ -144,6 +147,7 @@ impl BroadcastStageType { bank_forks, quic_endpoint_sender, BroadcastFakeShredsRun::new(0, shred_version), + Arc::new(RwLock::new(None)), ), BroadcastStageType::BroadcastDuplicates(config) => BroadcastStage::new( @@ -156,6 +160,7 @@ impl BroadcastStageType { bank_forks, quic_endpoint_sender, BroadcastDuplicatesRun::new(shred_version, config.clone()), + Arc::new(RwLock::new(None)), ), } } @@ -177,6 +182,7 @@ trait BroadcastRun { sock: &UdpSocket, bank_forks: &RwLock, quic_endpoint_sender: &AsyncSender<(SocketAddr, Bytes)>, + shred_receiver_address: &Arc>>, ) -> Result<()>; fn record(&mut self, receiver: &RecordReceiver, blockstore: &Blockstore) -> Result<()>; } @@ -272,6 +278,7 @@ impl BroadcastStage { bank_forks: Arc>, quic_endpoint_sender: AsyncSender<(SocketAddr, Bytes)>, broadcast_stage_run: impl BroadcastRun + Send + 'static + Clone, + shred_receiver_address: Arc>>, ) -> Self { let (socket_sender, socket_receiver) = unbounded(); let (blockstore_sender, blockstore_receiver) = unbounded(); @@ -303,6 +310,8 @@ impl BroadcastStage { let cluster_info = cluster_info.clone(); let bank_forks = bank_forks.clone(); let quic_endpoint_sender = quic_endpoint_sender.clone(); + let shred_receiver_address = shred_receiver_address.clone(); + let run_transmit = move || loop { let res = bs_transmit.transmit( &socket_receiver, @@ -310,6 +319,7 @@ impl BroadcastStage { &sock, &bank_forks, &quic_endpoint_sender, + &shred_receiver_address, ); let res = Self::handle_error(res, "solana-broadcaster-transmit"); if let Some(res) = res { @@ -420,6 +430,7 @@ fn update_peer_stats( /// Broadcasts shreds from the leader (i.e. this node) to the root of the /// turbine retransmit tree for each shred. +#[allow(clippy::too_many_arguments)] pub fn broadcast_shreds( s: &UdpSocket, shreds: &[Shred], @@ -430,6 +441,7 @@ pub fn broadcast_shreds( bank_forks: &RwLock, socket_addr_space: &SocketAddrSpace, quic_endpoint_sender: &AsyncSender<(SocketAddr, Bytes)>, + shred_receiver_address: &Option, ) -> Result<()> { let mut result = Ok(()); let mut shred_select = Measure::start("shred_select"); @@ -445,15 +457,34 @@ pub fn broadcast_shreds( let cluster_nodes = cluster_nodes_cache.get(slot, &root_bank, &working_bank, cluster_info); update_peer_stats(&cluster_nodes, last_datapoint_submit); - shreds.filter_map(move |shred| { + shreds.flat_map(move |shred| { let key = shred.id(); let protocol = cluster_nodes::get_broadcast_protocol(&key); - cluster_nodes - .get_broadcast_peer(&key)? - .tvu(protocol) - .ok() - .filter(|addr| socket_addr_space.check(addr)) - .map(|addr| { + + let mut addrs = Vec::with_capacity(2); + if let Some(shred_receiver_address) = shred_receiver_address { + // Assuming always over UDP for shred_receiver_address + addrs.push((Protocol::UDP, *shred_receiver_address)); + } + if let Some(peer) = cluster_nodes.get_broadcast_peer(&key) { + match protocol { + Protocol::QUIC => { + if let Ok(tvu) = peer.tvu(Protocol::QUIC) { + addrs.push((Protocol::QUIC, tvu)); + } + } + Protocol::UDP => { + if let Ok(tvu) = peer.tvu(Protocol::UDP) { + addrs.push((Protocol::UDP, tvu)); + } + } + } + } + + addrs + .into_iter() + .filter(|(_, a)| socket_addr_space.check(a)) + .map(move |(protocol, addr)| { (match protocol { Protocol::QUIC => Either::Right, Protocol::UDP => Either::Left, @@ -682,6 +713,7 @@ pub mod test { bank_forks, quic_endpoint_sender, StandardBroadcastRun::new(0), + Arc::new(RwLock::new(None)), ); MockBroadcastStage { @@ -721,7 +753,10 @@ pub mod test { let ticks = create_ticks(max_tick_height - start_tick_height, 0, Hash::default()); for (i, tick) in ticks.into_iter().enumerate() { entry_sender - .send((bank.clone(), (tick, i as u64 + 1))) + .send(WorkingBankEntry { + bank: bank.clone(), + entries_ticks: vec![(tick, i as u64 + 1)], + }) .expect("Expect successful send to broadcast service"); } } diff --git a/turbine/src/broadcast_stage/broadcast_duplicates_run.rs b/turbine/src/broadcast_stage/broadcast_duplicates_run.rs index 0db4003a07..6dfbe6f77d 100644 --- a/turbine/src/broadcast_stage/broadcast_duplicates_run.rs +++ b/turbine/src/broadcast_stage/broadcast_duplicates_run.rs @@ -280,6 +280,7 @@ impl BroadcastRun for BroadcastDuplicatesRun { sock: &UdpSocket, bank_forks: &RwLock, _quic_endpoint_sender: &AsyncSender<(SocketAddr, Bytes)>, + _shred_receiver_addr: &Arc>>, ) -> Result<()> { let (shreds, _) = receiver.recv()?; if shreds.is_empty() { diff --git a/turbine/src/broadcast_stage/broadcast_fake_shreds_run.rs b/turbine/src/broadcast_stage/broadcast_fake_shreds_run.rs index 1464d46493..60795a85bf 100644 --- a/turbine/src/broadcast_stage/broadcast_fake_shreds_run.rs +++ b/turbine/src/broadcast_stage/broadcast_fake_shreds_run.rs @@ -133,6 +133,7 @@ impl BroadcastRun for BroadcastFakeShredsRun { sock: &UdpSocket, _bank_forks: &RwLock, _quic_endpoint_sender: &AsyncSender<(SocketAddr, Bytes)>, + _shred_receiver_addr: &Arc>>, ) -> Result<()> { for (data_shreds, batch_info) in receiver { let fake = batch_info.is_some(); diff --git a/turbine/src/broadcast_stage/broadcast_utils.rs b/turbine/src/broadcast_stage/broadcast_utils.rs index fe99077091..283104e826 100644 --- a/turbine/src/broadcast_stage/broadcast_utils.rs +++ b/turbine/src/broadcast_stage/broadcast_utils.rs @@ -36,13 +36,23 @@ pub(super) fn recv_slot_entries(receiver: &Receiver) -> Result 32 * ShredData::capacity(/*merkle_proof_size*/ None).unwrap() as u64; let timer = Duration::new(1, 0); let recv_start = Instant::now(); - let (mut bank, (entry, mut last_tick_height)) = receiver.recv_timeout(timer)?; - let mut entries = vec![entry]; + + let WorkingBankEntry { + mut bank, + entries_ticks, + } = receiver.recv_timeout(timer)?; + let mut last_tick_height = entries_ticks.iter().last().unwrap().1; + let mut entries: Vec = entries_ticks.into_iter().map(|(e, _)| e).collect(); + assert!(last_tick_height <= bank.max_tick_height()); // Drain channel while last_tick_height != bank.max_tick_height() { - let Ok((try_bank, (entry, tick_height))) = receiver.try_recv() else { + let Ok(WorkingBankEntry { + bank: try_bank, + entries_ticks: new_entries_ticks, + }) = receiver.try_recv() + else { break; }; // If the bank changed, that implies the previous slot was interrupted and we do not have to @@ -52,8 +62,8 @@ pub(super) fn recv_slot_entries(receiver: &Receiver) -> Result entries.clear(); bank = try_bank; } - last_tick_height = tick_height; - entries.push(entry); + last_tick_height = new_entries_ticks.iter().last().unwrap().1; + entries.extend(new_entries_ticks.into_iter().map(|(entry, _)| entry)); assert!(last_tick_height <= bank.max_tick_height()); } @@ -64,8 +74,10 @@ pub(super) fn recv_slot_entries(receiver: &Receiver) -> Result while last_tick_height != bank.max_tick_height() && serialized_batch_byte_count < target_serialized_batch_byte_count { - let Ok((try_bank, (entry, tick_height))) = - receiver.recv_deadline(coalesce_start + ENTRY_COALESCE_DURATION) + let Ok(WorkingBankEntry { + bank: try_bank, + entries_ticks: new_entries_ticks, + }) = receiver.recv_deadline(coalesce_start + ENTRY_COALESCE_DURATION) else { break; }; @@ -78,10 +90,12 @@ pub(super) fn recv_slot_entries(receiver: &Receiver) -> Result bank = try_bank; coalesce_start = Instant::now(); } - last_tick_height = tick_height; - let entry_bytes = serialized_size(&entry)?; - serialized_batch_byte_count += entry_bytes; - entries.push(entry); + last_tick_height = new_entries_ticks.iter().last().unwrap().1; + + for (entry, _) in &new_entries_ticks { + serialized_batch_byte_count += serialized_size(entry)?; + } + entries.extend(new_entries_ticks.into_iter().map(|(entry, _)| entry)); assert!(last_tick_height <= bank.max_tick_height()); } let time_coalesced = coalesce_start.elapsed(); @@ -138,7 +152,11 @@ mod tests { .map(|i| { let entry = Entry::new(&last_hash, 1, vec![tx.clone()]); last_hash = entry.hash; - s.send((bank1.clone(), (entry.clone(), i))).unwrap(); + s.send(WorkingBankEntry { + bank: bank1.clone(), + entries_ticks: vec![(entry.clone(), i)], + }) + .unwrap(); entry }) .collect(); @@ -172,11 +190,18 @@ mod tests { last_hash = entry.hash; // Interrupt slot 1 right before the last tick if tick_height == expected_last_height { - s.send((bank2.clone(), (entry.clone(), tick_height))) - .unwrap(); + s.send(WorkingBankEntry { + bank: bank2.clone(), + entries_ticks: vec![(entry.clone(), tick_height)], + }) + .unwrap(); Some(entry) } else { - s.send((bank1.clone(), (entry, tick_height))).unwrap(); + s.send(WorkingBankEntry { + bank: bank1.clone(), + entries_ticks: vec![(entry, tick_height)], + }) + .unwrap(); None } }) diff --git a/turbine/src/broadcast_stage/fail_entry_verification_broadcast_run.rs b/turbine/src/broadcast_stage/fail_entry_verification_broadcast_run.rs index 1dda981e69..215e619a5a 100644 --- a/turbine/src/broadcast_stage/fail_entry_verification_broadcast_run.rs +++ b/turbine/src/broadcast_stage/fail_entry_verification_broadcast_run.rs @@ -3,7 +3,7 @@ use { crate::cluster_nodes::ClusterNodesCache, solana_ledger::shred::{ProcessShredsStats, ReedSolomonCache, Shredder}, solana_sdk::{hash::Hash, signature::Keypair}, - std::{thread::sleep, time::Duration}, + std::{net::SocketAddr, thread::sleep, time::Duration}, tokio::sync::mpsc::Sender as AsyncSender, }; @@ -164,6 +164,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { sock: &UdpSocket, bank_forks: &RwLock, quic_endpoint_sender: &AsyncSender<(SocketAddr, Bytes)>, + shred_receiver_address: &Arc>>, ) -> Result<()> { let (shreds, _) = receiver.recv()?; broadcast_shreds( @@ -176,6 +177,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { bank_forks, cluster_info.socket_addr_space(), quic_endpoint_sender, + &shred_receiver_address.read().unwrap(), ) } fn record(&mut self, receiver: &RecordReceiver, blockstore: &Blockstore) -> Result<()> { diff --git a/turbine/src/broadcast_stage/standard_broadcast_run.rs b/turbine/src/broadcast_stage/standard_broadcast_run.rs index 37850d6ba9..96b447c4af 100644 --- a/turbine/src/broadcast_stage/standard_broadcast_run.rs +++ b/turbine/src/broadcast_stage/standard_broadcast_run.rs @@ -17,7 +17,7 @@ use { signature::Keypair, timing::{duration_as_us, AtomicInterval}, }, - std::{sync::RwLock, time::Duration}, + std::{net::SocketAddr, sync::RwLock, time::Duration}, tokio::sync::mpsc::Sender as AsyncSender, }; @@ -190,10 +190,24 @@ impl StandardBroadcastRun { let (ssend, srecv) = unbounded(); self.process_receive_results(keypair, blockstore, &ssend, &bsend, receive_results)?; //data - let _ = self.transmit(&srecv, cluster_info, sock, bank_forks, quic_endpoint_sender); + let _ = self.transmit( + &srecv, + cluster_info, + sock, + bank_forks, + quic_endpoint_sender, + &Arc::new(RwLock::new(None)), + ); let _ = self.record(&brecv, blockstore); //coding - let _ = self.transmit(&srecv, cluster_info, sock, bank_forks, quic_endpoint_sender); + let _ = self.transmit( + &srecv, + cluster_info, + sock, + bank_forks, + quic_endpoint_sender, + &Arc::new(RwLock::new(None)), + ); let _ = self.record(&brecv, blockstore); Ok(()) } @@ -391,6 +405,7 @@ impl StandardBroadcastRun { broadcast_shred_batch_info: Option, bank_forks: &RwLock, quic_endpoint_sender: &AsyncSender<(SocketAddr, Bytes)>, + shred_receiver_addr: &Option, ) -> Result<()> { trace!("Broadcasting {:?} shreds", shreds.len()); let mut transmit_stats = TransmitShredsStats::default(); @@ -407,6 +422,7 @@ impl StandardBroadcastRun { bank_forks, cluster_info.socket_addr_space(), quic_endpoint_sender, + shred_receiver_addr, )?; transmit_time.stop(); @@ -477,6 +493,7 @@ impl BroadcastRun for StandardBroadcastRun { sock: &UdpSocket, bank_forks: &RwLock, quic_endpoint_sender: &AsyncSender<(SocketAddr, Bytes)>, + shred_receiver_address: &Arc>>, ) -> Result<()> { let (shreds, batch_info) = receiver.recv()?; self.broadcast( @@ -486,6 +503,7 @@ impl BroadcastRun for StandardBroadcastRun { batch_info, bank_forks, quic_endpoint_sender, + &shred_receiver_address.read().unwrap(), ) } fn record(&mut self, receiver: &RecordReceiver, blockstore: &Blockstore) -> Result<()> { diff --git a/turbine/src/retransmit_stage.rs b/turbine/src/retransmit_stage.rs index c4c7a751ab..78c5a9ce86 100644 --- a/turbine/src/retransmit_stage.rs +++ b/turbine/src/retransmit_stage.rs @@ -179,6 +179,7 @@ fn retransmit( shred_deduper: &mut ShredDeduper<2>, max_slots: &MaxSlots, rpc_subscriptions: Option<&RpcSubscriptions>, + shred_receiver_address: &Arc>>, ) -> Result<(), RecvTimeoutError> { const RECV_TIMEOUT: Duration = Duration::from_secs(1); let mut shreds = shreds_receiver.recv_timeout(RECV_TIMEOUT)?; @@ -259,6 +260,7 @@ fn retransmit( &sockets[index % sockets.len()], quic_endpoint_sender, stats, + &shred_receiver_address.read().unwrap(), ) .map_err(|err| { stats.record_error(&err); @@ -284,6 +286,7 @@ fn retransmit( &sockets[index % sockets.len()], quic_endpoint_sender, stats, + &shred_receiver_address.read().unwrap(), ) .map_err(|err| { stats.record_error(&err); @@ -303,6 +306,7 @@ fn retransmit( Ok(()) } +#[allow(clippy::too_many_arguments)] fn retransmit_shred( key: &ShredId, shred: &[u8], @@ -313,15 +317,20 @@ fn retransmit_shred( socket: &UdpSocket, quic_endpoint_sender: &AsyncSender<(SocketAddr, Bytes)>, stats: &RetransmitStats, + shred_receiver_addr: &Option, ) -> Result<(/*root_distance:*/ usize, /*num_nodes:*/ usize), Error> { let mut compute_turbine_peers = Measure::start("turbine_start"); let data_plane_fanout = cluster_nodes::get_data_plane_fanout(key.slot(), root_bank); let (root_distance, addrs) = cluster_nodes.get_retransmit_addrs(slot_leader, key, data_plane_fanout)?; - let addrs: Vec<_> = addrs + let mut addrs: Vec<_> = addrs .into_iter() .filter(|addr| socket_addr_space.check(addr)) .collect(); + if let Some(addr) = shred_receiver_addr { + addrs.push(*addr); + } + compute_turbine_peers.stop(); stats .compute_turbine_peers_total @@ -378,6 +387,7 @@ pub fn retransmitter( shreds_receiver: Receiver>>, max_slots: Arc, rpc_subscriptions: Option>, + shred_receiver_addr: Arc>>, ) -> JoinHandle<()> { let cluster_nodes_cache = ClusterNodesCache::::new( CLUSTER_NODES_CACHE_NUM_EPOCH_CAP, @@ -409,6 +419,7 @@ pub fn retransmitter( &mut shred_deduper, &max_slots, rpc_subscriptions.as_deref(), + &shred_receiver_addr, ) { Ok(()) => (), Err(RecvTimeoutError::Timeout) => (), @@ -432,6 +443,7 @@ impl RetransmitStage { retransmit_receiver: Receiver>>, max_slots: Arc, rpc_subscriptions: Option>, + shred_receiver_addr: Arc>>, ) -> Self { let retransmit_thread_handle = retransmitter( retransmit_sockets, @@ -442,6 +454,7 @@ impl RetransmitStage { retransmit_receiver, max_slots, rpc_subscriptions, + shred_receiver_addr, ); Self { diff --git a/validator/Cargo.toml b/validator/Cargo.toml index 845bdda7ee..5d1a3270a3 100644 --- a/validator/Cargo.toml +++ b/validator/Cargo.toml @@ -54,6 +54,7 @@ solana-rpc = { workspace = true } solana-rpc-client = { workspace = true } solana-rpc-client-api = { workspace = true } solana-runtime = { workspace = true } +solana-runtime-plugin = { workspace = true } solana-sdk = { workspace = true } solana-send-transaction-service = { workspace = true } solana-storage-bigtable = { workspace = true } @@ -64,6 +65,7 @@ solana-version = { workspace = true } solana-vote-program = { workspace = true } symlink = { workspace = true } thiserror = { workspace = true } +tonic = { workspace = true, features = ["tls", "tls-roots", "tls-webpki-roots"] } [dev-dependencies] solana-account-decoder = { workspace = true } diff --git a/validator/src/admin_rpc_service.rs b/validator/src/admin_rpc_service.rs index 6958482209..547f599316 100644 --- a/validator/src/admin_rpc_service.rs +++ b/validator/src/admin_rpc_service.rs @@ -13,6 +13,10 @@ use { solana_core::{ admin_rpc_post_init::AdminRpcRequestMetadataPostInit, consensus::{tower_storage::TowerStorage, Tower}, + proxy::{ + block_engine_stage::{BlockEngineConfig, BlockEngineStage}, + relayer_stage::{RelayerConfig, RelayerStage}, + }, validator::ValidatorStartProgress, }, solana_geyser_plugin_manager::GeyserPluginManagerRequest, @@ -30,6 +34,7 @@ use { fmt::{self, Display}, net::SocketAddr, path::{Path, PathBuf}, + str::FromStr, sync::{Arc, RwLock}, thread::{self, Builder}, time::{Duration, SystemTime}, @@ -233,6 +238,27 @@ pub trait AdminRpc { meta: Self::Metadata, public_tpu_forwards_addr: SocketAddr, ) -> Result<()>; + + #[rpc(meta, name = "setBlockEngineConfig")] + fn set_block_engine_config( + &self, + meta: Self::Metadata, + block_engine_url: String, + trust_packets: bool, + ) -> Result<()>; + + #[rpc(meta, name = "setRelayerConfig")] + fn set_relayer_config( + &self, + meta: Self::Metadata, + relayer_url: String, + trust_packets: bool, + expected_heartbeat_interval_ms: u64, + max_failed_heartbeats: u64, + ) -> Result<()>; + + #[rpc(meta, name = "setShredReceiverAddress")] + fn set_shred_receiver_address(&self, meta: Self::Metadata, addr: String) -> Result<()>; } pub struct AdminRpcImpl; @@ -431,6 +457,30 @@ impl AdminRpc for AdminRpcImpl { Ok(()) } + fn set_block_engine_config( + &self, + meta: Self::Metadata, + block_engine_url: String, + trust_packets: bool, + ) -> Result<()> { + debug!("set_block_engine_config request received"); + let config = BlockEngineConfig { + block_engine_url, + trust_packets, + }; + // Detailed log messages are printed inside validate function + if BlockEngineStage::is_valid_block_engine_config(&config) { + meta.with_post_init(|post_init| { + *post_init.block_engine_config.lock().unwrap() = config; + Ok(()) + }) + } else { + Err(jsonrpc_core::error::Error::invalid_params( + "failed to set block engine config. see logs for details.", + )) + } + } + fn set_identity( &self, meta: Self::Metadata, @@ -465,6 +515,55 @@ impl AdminRpc for AdminRpcImpl { AdminRpcImpl::set_identity_keypair(meta, identity_keypair, require_tower) } + fn set_relayer_config( + &self, + meta: Self::Metadata, + relayer_url: String, + trust_packets: bool, + expected_heartbeat_interval_ms: u64, + max_failed_heartbeats: u64, + ) -> Result<()> { + debug!("set_relayer_config request received"); + let expected_heartbeat_interval = Duration::from_millis(expected_heartbeat_interval_ms); + let oldest_allowed_heartbeat = + Duration::from_millis(max_failed_heartbeats * expected_heartbeat_interval_ms); + let config = RelayerConfig { + relayer_url, + expected_heartbeat_interval, + oldest_allowed_heartbeat, + trust_packets, + }; + // Detailed log messages are printed inside validate function + if RelayerStage::is_valid_relayer_config(&config) { + meta.with_post_init(|post_init| { + *post_init.relayer_config.lock().unwrap() = config; + Ok(()) + }) + } else { + Err(jsonrpc_core::error::Error::invalid_params( + "failed to set relayer config. see logs for details.", + )) + } + } + + fn set_shred_receiver_address(&self, meta: Self::Metadata, addr: String) -> Result<()> { + let shred_receiver_address = if addr.is_empty() { + None + } else { + Some(SocketAddr::from_str(&addr).map_err(|_| { + jsonrpc_core::error::Error::invalid_params(format!( + "invalid shred receiver address: {}", + addr + )) + })?) + }; + + meta.with_post_init(|post_init| { + *post_init.shred_receiver_address.write().unwrap() = shred_receiver_address; + Ok(()) + }) + } + fn set_staked_nodes_overrides(&self, meta: Self::Metadata, path: String) -> Result<()> { let loaded_config = load_staked_nodes_overrides(&path) .map_err(|err| { @@ -838,7 +937,10 @@ mod tests { solana_program::{program_option::COption, program_pack::Pack}, state::{Account as TokenAccount, AccountState as TokenAccountState, Mint}, }, - std::{collections::HashSet, sync::atomic::AtomicBool}, + std::{ + collections::HashSet, + sync::{atomic::AtomicBool, Mutex}, + }, }; #[derive(Default)] @@ -876,6 +978,9 @@ mod tests { let vote_account = vote_keypair.pubkey(); let start_progress = Arc::new(RwLock::new(ValidatorStartProgress::default())); let repair_whitelist = Arc::new(RwLock::new(HashSet::new())); + let block_engine_config = Arc::new(Mutex::new(BlockEngineConfig::default())); + let relayer_config = Arc::new(Mutex::new(RelayerConfig::default())); + let shred_receiver_address = Arc::new(RwLock::new(None)); let meta = AdminRpcRequestMetadata { rpc_addr: None, start_time: SystemTime::now(), @@ -888,6 +993,9 @@ mod tests { bank_forks: bank_forks.clone(), vote_account, repair_whitelist, + block_engine_config, + relayer_config, + shred_receiver_address, }))), staked_nodes_overrides: Arc::new(RwLock::new(HashMap::new())), rpc_to_plugin_manager_sender: None, diff --git a/validator/src/bootstrap.rs b/validator/src/bootstrap.rs index 88a45fdad5..26c2999c7f 100644 --- a/validator/src/bootstrap.rs +++ b/validator/src/bootstrap.rs @@ -814,12 +814,13 @@ fn get_highest_local_snapshot_hash( incremental_snapshot_archives_dir: impl AsRef, incremental_snapshot_fetch: bool, ) -> Option<(Slot, Hash)> { - snapshot_utils::get_highest_full_snapshot_archive_info(full_snapshot_archives_dir) + snapshot_utils::get_highest_full_snapshot_archive_info(full_snapshot_archives_dir, None) .and_then(|full_snapshot_info| { if incremental_snapshot_fetch { snapshot_utils::get_highest_incremental_snapshot_archive_info( incremental_snapshot_archives_dir, full_snapshot_info.slot(), + None, ) .map(|incremental_snapshot_info| { ( diff --git a/validator/src/cli.rs b/validator/src/cli.rs index 2e40751840..dc233751ef 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -60,6 +60,10 @@ const MAX_SNAPSHOT_DOWNLOAD_ABORT: u32 = 5; // with less than 2 ticks per slot. const MINIMUM_TICKS_PER_SLOT: u64 = 2; +const DEFAULT_PREALLOCATED_BUNDLE_COST: &str = "3000000"; +const DEFAULT_RELAYER_EXPECTED_HEARTBEAT_INTERVAL_MS: &str = "500"; +const DEFAULT_RELAYER_MAX_FAILED_HEARTBEATS: &str = "3"; + pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { return App::new(crate_name!()).about(crate_description!()) .version(version) @@ -70,6 +74,87 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .long(SKIP_SEED_PHRASE_VALIDATION_ARG.long) .help(SKIP_SEED_PHRASE_VALIDATION_ARG.help), ) + .arg( + Arg::with_name("block_engine_url") + .long("block-engine-url") + .help("Block engine url. Set to empty string to disable block engine connection.") + .takes_value(true) + ) + .arg( + Arg::with_name("relayer_url") + .long("relayer-url") + .help("Relayer url. Set to empty string to disable relayer connection.") + .takes_value(true) + ) + .arg( + Arg::with_name("trust_relayer_packets") + .long("trust-relayer-packets") + .takes_value(false) + .help("Skip signature verification on relayer packets. Not recommended unless the relayer is trusted.") + ) + .arg( + Arg::with_name("relayer_expected_heartbeat_interval_ms") + .long("relayer-expected-heartbeat-interval-ms") + .takes_value(true) + .help("Interval at which the Relayer is expected to send heartbeat messages.") + .default_value(DEFAULT_RELAYER_EXPECTED_HEARTBEAT_INTERVAL_MS) + ) + .arg( + Arg::with_name("relayer_max_failed_heartbeats") + .long("relayer-max-failed-heartbeats") + .takes_value(true) + .help("Maximum number of heartbeats the Relayer can miss before falling back to the normal TPU pipeline.") + .default_value(DEFAULT_RELAYER_MAX_FAILED_HEARTBEATS) + ) + .arg( + Arg::with_name("trust_block_engine_packets") + .long("trust-block-engine-packets") + .takes_value(false) + .help("Skip signature verification on block engine packets. Not recommended unless the block engine is trusted.") + ) + .arg( + Arg::with_name("tip_payment_program_pubkey") + .long("tip-payment-program-pubkey") + .value_name("TIP_PAYMENT_PROGRAM_PUBKEY") + .takes_value(true) + .help("The public key of the tip-payment program") + ) + .arg( + Arg::with_name("tip_distribution_program_pubkey") + .long("tip-distribution-program-pubkey") + .value_name("TIP_DISTRIBUTION_PROGRAM_PUBKEY") + .takes_value(true) + .help("The public key of the tip-distribution program.") + ) + .arg( + Arg::with_name("merkle_root_upload_authority") + .long("merkle-root-upload-authority") + .value_name("MERKLE_ROOT_UPLOAD_AUTHORITY") + .takes_value(true) + .help("The public key of the authorized merkle-root uploader.") + ) + .arg( + Arg::with_name("commission_bps") + .long("commission-bps") + .value_name("COMMISSION_BPS") + .takes_value(true) + .help("The commission validator takes from tips expressed in basis points.") + ) + .arg( + Arg::with_name("preallocated_bundle_cost") + .long("preallocated-bundle-cost") + .value_name("PREALLOCATED_BUNDLE_COST") + .takes_value(true) + .default_value(DEFAULT_PREALLOCATED_BUNDLE_COST) + .help("Number of CUs to allocate for bundles at beginning of slot.") + ) + .arg( + Arg::with_name("shred_receiver_address") + .long("shred-receiver-address") + .value_name("SHRED_RECEIVER_ADDRESS") + .takes_value(true) + .help("Validator will forward all shreds to this address in addition to normal turbine operation. Set to empty string to disable.") + ) .arg( Arg::with_name("identity") .short("i") @@ -1105,6 +1190,14 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .multiple(true) .help("Specify the configuration file for the Geyser plugin."), ) + .arg( + Arg::with_name("runtime_plugin_config") + .long("runtime-plugin-config") + .value_name("FILE") + .takes_value(true) + .multiple(true) + .help("Specify the configuration file for a Runtime plugin."), + ) .arg( Arg::with_name("snapshot_archive_format") .long("snapshot-archive-format") @@ -1394,6 +1487,68 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { ) .args(&get_deprecated_arguments()) .after_help("The default subcommand is run") + .subcommand( + SubCommand::with_name("set-block-engine-config") + .about("Set configuration for connection to a block engine") + .arg( + Arg::with_name("block_engine_url") + .long("block-engine-url") + .help("Block engine url. Set to empty string to disable block engine connection.") + .takes_value(true) + .required(true) + ) + .arg( + Arg::with_name("trust_block_engine_packets") + .long("trust-block-engine-packets") + .takes_value(false) + .help("Skip signature verification on block engine packets. Not recommended unless the block engine is trusted.") + ) + ) + .subcommand( + SubCommand::with_name("set-relayer-config") + .about("Set configuration for connection to a relayer") + .arg( + Arg::with_name("relayer_url") + .long("relayer-url") + .help("Relayer url. Set to empty string to disable relayer connection.") + .takes_value(true) + .required(true) + ) + .arg( + Arg::with_name("trust_relayer_packets") + .long("trust-relayer-packets") + .takes_value(false) + .help("Skip signature verification on relayer packets. Not recommended unless the relayer is trusted.") + ) + .arg( + Arg::with_name("relayer_expected_heartbeat_interval_ms") + .long("relayer-expected-heartbeat-interval-ms") + .takes_value(true) + .help("Interval at which the Relayer is expected to send heartbeat messages.") + .required(false) + .default_value(DEFAULT_RELAYER_EXPECTED_HEARTBEAT_INTERVAL_MS) + ) + .arg( + Arg::with_name("relayer_max_failed_heartbeats") + .long("relayer-max-failed-heartbeats") + .takes_value(true) + .help("Maximum number of heartbeats the Relayer can miss before falling back to the normal TPU pipeline.") + .required(false) + .default_value(DEFAULT_RELAYER_MAX_FAILED_HEARTBEATS) + ) + ) + .subcommand( + SubCommand::with_name("set-shred-receiver-address") + .about("Changes shred receiver address") + .arg( + Arg::with_name("shred_receiver_address") + .long("shred-receiver-address") + .value_name("SHRED_RECEIVER_ADDRESS") + .takes_value(true) + .help("Validator will forward all shreds to this address in addition to normal turbine operation. Set to empty string to disable.") + .required(true) + ) + ) .subcommand( SubCommand::with_name("exit") .about("Send an exit request to the validator") @@ -1532,6 +1687,48 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { SubCommand::with_name("run") .about("Run the validator") ) + .subcommand( + SubCommand::with_name("runtime-plugin") + .about("Manage and view runtime plugins") + .setting(AppSettings::SubcommandRequiredElseHelp) + .setting(AppSettings::InferSubcommands) + .subcommand( + SubCommand::with_name("list") + .about("List all current running runtime plugins") + ) + .subcommand( + SubCommand::with_name("unload") + .about("Unload a particular runtime plugin. You must specify the runtime plugin name") + .arg( + Arg::with_name("name") + .required(true) + .takes_value(true) + ) + ) + .subcommand( + SubCommand::with_name("reload") + .about("Reload a particular runtime plugin. You must specify the runtime plugin name and the new config path") + .arg( + Arg::with_name("name") + .required(true) + .takes_value(true) + ) + .arg( + Arg::with_name("config") + .required(true) + .takes_value(true) + ) + ) + .subcommand( + SubCommand::with_name("load") + .about("Load a new gesyer plugin. You must specify the config path. Fails if overwriting (use reload)") + .arg( + Arg::with_name("config") + .required(true) + .takes_value(true) + ) + ) + ) .subcommand( SubCommand::with_name("plugin") .about("Manage and view geyser plugins") @@ -2483,6 +2680,14 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .multiple(true) .help("Specify the configuration file for the Geyser plugin."), ) + .arg( + Arg::with_name("runtime_plugin_config") + .long("runtime-plugin-config") + .value_name("FILE") + .takes_value(true) + .multiple(true) + .help("Specify the configuration file for a Runtime plugin."), + ) .arg( Arg::with_name("deactivate_feature") .long("deactivate-feature") diff --git a/validator/src/dashboard.rs b/validator/src/dashboard.rs index f6df5693c0..d9c42c2690 100644 --- a/validator/src/dashboard.rs +++ b/validator/src/dashboard.rs @@ -273,6 +273,7 @@ fn get_validator_stats( Ok(()) => "ok".to_string(), Err(err) => { if let client_error::ErrorKind::RpcError(request::RpcError::RpcResponseError { + request_id: _, code: _, message: _, data: diff --git a/validator/src/main.rs b/validator/src/main.rs index f5b0ce1a06..e91bc36c0f 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1,10 +1,12 @@ #![allow(clippy::arithmetic_side_effects)] + #[cfg(not(target_env = "msvc"))] use jemallocator::Jemalloc; use { clap::{crate_name, value_t, value_t_or_exit, values_t, values_t_or_exit, ArgMatches}, console::style, crossbeam_channel::unbounded, + jsonrpc_server_utils::tokio::runtime::Runtime, log::*, rand::{seq::SliceRandom, thread_rng}, solana_accounts_db::{ @@ -23,7 +25,9 @@ use { banking_trace::DISABLED_BAKING_TRACE_DIR, consensus::tower_storage, ledger_cleanup_service::{DEFAULT_MAX_LEDGER_SHREDS, DEFAULT_MIN_MAX_LEDGER_SHREDS}, + proxy::{block_engine_stage::BlockEngineConfig, relayer_stage::RelayerConfig}, system_monitor_service::SystemMonitorService, + tip_manager::{TipDistributionAccountConfig, TipManagerConfig}, tpu::DEFAULT_TPU_COALESCE, validator::{ is_snapshot_config_valid, BlockProductionMethod, BlockVerificationMethod, Validator, @@ -55,6 +59,10 @@ use { ArchiveFormat, SnapshotVersion, }, }, + solana_runtime_plugin::{ + runtime_plugin_admin_rpc_service, + runtime_plugin_admin_rpc_service::RuntimePluginAdminRpcRequestMetadata, + }, solana_sdk::{ clock::{Slot, DEFAULT_S_PER_SLOT}, commitment_config::CommitmentConfig, @@ -83,7 +91,7 @@ use { path::{Path, PathBuf}, process::exit, str::FromStr, - sync::{Arc, RwLock}, + sync::{atomic::AtomicBool, Arc, Mutex, RwLock}, time::{Duration, SystemTime}, }, }; @@ -473,6 +481,60 @@ pub fn main() { let operation = match matches.subcommand() { ("", _) | ("run", _) => Operation::Run, + ("set-block-engine-config", Some(subcommand_matches)) => { + let block_engine_url = value_t_or_exit!(subcommand_matches, "block_engine_url", String); + let trust_packets = subcommand_matches.is_present("trust_block_engine_packets"); + let admin_client = admin_rpc_service::connect(&ledger_path); + admin_rpc_service::runtime() + .block_on(async move { + admin_client + .await? + .set_block_engine_config(block_engine_url, trust_packets) + .await + }) + .unwrap_or_else(|err| { + println!("set block engine config failed: {}", err); + exit(1); + }); + return; + } + ("set-relayer-config", Some(subcommand_matches)) => { + let relayer_url = value_t_or_exit!(subcommand_matches, "relayer_url", String); + let trust_packets = subcommand_matches.is_present("trust_relayer_packets"); + let expected_heartbeat_interval_ms: u64 = + value_of(subcommand_matches, "relayer_expected_heartbeat_interval_ms").unwrap(); + let max_failed_heartbeats: u64 = + value_of(subcommand_matches, "relayer_max_failed_heartbeats").unwrap(); + let admin_client = admin_rpc_service::connect(&ledger_path); + admin_rpc_service::runtime() + .block_on(async move { + admin_client + .await? + .set_relayer_config( + relayer_url, + trust_packets, + expected_heartbeat_interval_ms, + max_failed_heartbeats, + ) + .await + }) + .unwrap_or_else(|err| { + println!("set relayer config failed: {}", err); + exit(1); + }); + return; + } + ("set-shred-receiver-address", Some(subcommand_matches)) => { + let addr = value_t_or_exit!(subcommand_matches, "shred_receiver_address", String); + let admin_client = admin_rpc_service::connect(&ledger_path); + admin_rpc_service::runtime() + .block_on(async move { admin_client.await?.set_shred_receiver_address(addr).await }) + .unwrap_or_else(|err| { + println!("set shred receiver address failed: {}", err); + exit(1); + }); + return; + } ("authorized-voter", Some(authorized_voter_subcommand_matches)) => { match authorized_voter_subcommand_matches.subcommand() { ("add", Some(subcommand_matches)) => { @@ -624,6 +686,92 @@ pub fn main() { _ => unreachable!(), } } + ("runtime-plugin", Some(plugin_subcommand_matches)) => { + let runtime_plugin_rpc_client = runtime_plugin_admin_rpc_service::connect(&ledger_path); + let runtime = Runtime::new().unwrap(); + match plugin_subcommand_matches.subcommand() { + ("list", _) => { + let plugins = runtime + .block_on( + async move { runtime_plugin_rpc_client.await?.list_plugins().await }, + ) + .unwrap_or_else(|err| { + println!("Failed to list plugins: {err}"); + exit(1); + }); + if !plugins.is_empty() { + println!("Currently the following plugins are loaded:"); + for (plugin, i) in plugins.into_iter().zip(1..) { + println!(" {i}) {plugin}"); + } + } else { + println!("There are currently no plugins loaded"); + } + return; + } + ("unload", Some(subcommand_matches)) => { + if let Ok(name) = value_t!(subcommand_matches, "name", String) { + runtime + .block_on(async { + runtime_plugin_rpc_client + .await? + .unload_plugin(name.clone()) + .await + }) + .unwrap_or_else(|err| { + println!("Failed to unload plugin {name}: {err:?}"); + exit(1); + }); + println!("Successfully unloaded plugin: {name}"); + } + return; + } + ("load", Some(subcommand_matches)) => { + if let Ok(config) = value_t!(subcommand_matches, "config", String) { + let name = runtime + .block_on(async { + runtime_plugin_rpc_client + .await? + .load_plugin(config.clone()) + .await + }) + .unwrap_or_else(|err| { + println!("Failed to load plugin {config}: {err:?}"); + exit(1); + }); + println!("Successfully loaded plugin: {name}"); + } + return; + } + ("reload", Some(subcommand_matches)) => { + if let Ok(name) = value_t!(subcommand_matches, "name", String) { + if let Ok(config) = value_t!(subcommand_matches, "config", String) { + println!( + "This command does not work as intended on some systems.\ + To correctly reload an existing plugin make sure to:\ + 1. Rename the new plugin binary file.\ + 2. Unload the previous version.\ + 3. Load the new, renamed binary using the 'Load' command." + ); + runtime + .block_on(async { + runtime_plugin_rpc_client + .await? + .reload_plugin(name.clone(), config.clone()) + .await + }) + .unwrap_or_else(|err| { + println!("Failed to reload plugin {name}: {err:?}"); + exit(1); + }); + println!("Successfully reloaded plugin: {name}"); + } + } + return; + } + _ => unreachable!(), + } + } ("contact-info", Some(subcommand_matches)) => { let output_mode = subcommand_matches.value_of("output"); let admin_client = admin_rpc_service::connect(&ledger_path); @@ -1268,6 +1416,44 @@ pub fn main() { } let full_api = matches.is_present("full_rpc_api"); + let voting_disabled = matches.is_present("no_voting") || restricted_repair_only_mode; + let tip_manager_config = tip_manager_config_from_matches(&matches, voting_disabled); + + let block_engine_config = BlockEngineConfig { + block_engine_url: if matches.is_present("block_engine_url") { + value_of(&matches, "block_engine_url").expect("couldn't parse block_engine_url") + } else { + "".to_string() + }, + trust_packets: matches.is_present("trust_block_engine_packets"), + }; + + // Defaults are set in cli definition, safe to use unwrap() here + let expected_heartbeat_interval_ms: u64 = + value_of(&matches, "relayer_expected_heartbeat_interval_ms").unwrap(); + assert!( + expected_heartbeat_interval_ms > 0, + "relayer-max-failed-heartbeats must be greater than zero" + ); + let max_failed_heartbeats: u64 = value_of(&matches, "relayer_max_failed_heartbeats").unwrap(); + assert!( + max_failed_heartbeats > 0, + "relayer-max-failed-heartbeats must be greater than zero" + ); + + let relayer_config = RelayerConfig { + relayer_url: if matches.is_present("relayer_url") { + value_of(&matches, "relayer_url").expect("couldn't parse relayer_url") + } else { + "".to_string() + }, + expected_heartbeat_interval: Duration::from_millis(expected_heartbeat_interval_ms), + oldest_allowed_heartbeat: Duration::from_millis( + max_failed_heartbeats * expected_heartbeat_interval_ms, + ), + trust_packets: matches.is_present("trust_relayer_packets"), + }; + let mut validator_config = ValidatorConfig { require_tower: matches.is_present("require_tower"), tower_storage, @@ -1400,6 +1586,14 @@ pub fn main() { log_messages_bytes_limit: value_of(&matches, "log_messages_bytes_limit"), ..RuntimeConfig::default() }, + relayer_config: Arc::new(Mutex::new(relayer_config)), + block_engine_config: Arc::new(Mutex::new(block_engine_config)), + tip_manager_config, + shred_receiver_address: Arc::new(RwLock::new( + matches + .value_of("shred_receiver_address") + .map(|addr| SocketAddr::from_str(addr).expect("shred_receiver_address invalid")), + )), staked_nodes_overrides: staked_nodes_overrides.clone(), replay_slots_concurrently: matches.is_present("replay_slots_concurrently"), use_snapshot_archives_at_startup: value_t_or_exit!( @@ -1407,6 +1601,8 @@ pub fn main() { use_snapshot_archives_at_startup::cli::NAME, UseSnapshotArchivesAtStartup ), + preallocated_bundle_cost: value_of(&matches, "preallocated_bundle_cost") + .expect("preallocated_bundle_cost set as default"), ..ValidatorConfig::default() }; @@ -1713,6 +1909,31 @@ pub fn main() { }, ); + let runtime_plugin_config_and_rpc_rx = { + let plugin_exit = Arc::new(AtomicBool::new(false)); + let (rpc_request_sender, rpc_request_receiver) = unbounded(); + solana_runtime_plugin::runtime_plugin_admin_rpc_service::run( + &ledger_path, + RuntimePluginAdminRpcRequestMetadata { + rpc_request_sender, + validator_exit: validator_config.validator_exit.clone(), + }, + plugin_exit, + ); + + if matches.is_present("runtime_plugin_config") { + ( + values_t_or_exit!(matches, "runtime_plugin_config", String) + .into_iter() + .map(PathBuf::from) + .collect(), + rpc_request_receiver, + ) + } else { + (vec![], rpc_request_receiver) + } + }; + let gossip_host: IpAddr = matches .value_of("gossip_host") .map(|gossip_host| { @@ -1885,6 +2106,7 @@ pub fn main() { tpu_connection_pool_size, tpu_enable_udp, admin_service_post_init, + Some(runtime_plugin_config_and_rpc_rx), ) .unwrap_or_else(|e| { error!("Failed to start validator: {:?}", e); @@ -1950,3 +2172,47 @@ fn process_account_indexes(matches: &ArgMatches) -> AccountSecondaryIndexes { indexes: account_indexes, } } + +fn tip_manager_config_from_matches( + matches: &ArgMatches, + voting_disabled: bool, +) -> TipManagerConfig { + TipManagerConfig { + tip_payment_program_id: pubkey_of(matches, "tip_payment_program_pubkey").unwrap_or_else( + || { + if !voting_disabled { + panic!("--tip-payment-program-pubkey argument required when validator is voting"); + } + Pubkey::new_unique() + }, + ), + tip_distribution_program_id: pubkey_of(matches, "tip_distribution_program_pubkey") + .unwrap_or_else(|| { + if !voting_disabled { + panic!("--tip-distribution-program-pubkey argument required when validator is voting"); + } + Pubkey::new_unique() + }), + tip_distribution_account_config: TipDistributionAccountConfig { + merkle_root_upload_authority: pubkey_of(matches, "merkle_root_upload_authority") + .unwrap_or_else(|| { + if !voting_disabled { + panic!("--merkle-root-upload-authority argument required when validator is voting"); + } + Pubkey::new_unique() + }), + vote_account: pubkey_of(matches, "vote_account").unwrap_or_else(|| { + if !voting_disabled { + panic!("--vote-account argument required when validator is voting"); + } + Pubkey::new_unique() + }), + commission_bps: value_t!(matches, "commission_bps", u16).unwrap_or_else(|_| { + if !voting_disabled { + panic!("--commission-bps argument required when validator is voting"); + } + 0 + }), + }, + } +} diff --git a/version/src/lib.rs b/version/src/lib.rs index edeca08c96..68ce039318 100644 --- a/version/src/lib.rs +++ b/version/src/lib.rs @@ -63,7 +63,7 @@ impl Default for Version { commit: compute_commit(option_env!("CI_COMMIT")).unwrap_or_default(), feature_set, // Other client implementations need to modify this line. - client: u16::try_from(ClientId::SolanaLabs).unwrap(), + client: u16::try_from(ClientId::JitoLabs).unwrap(), } } }