diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 99daeeca9d5..7b606dd0f61 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -9,7 +9,8 @@ on: env: DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} - IMAGE_NAME: sigp/lighthouse + IMAGE_NAME: ${{ github.repository_owner}}/lighthouse + LCLI_IMAGE_NAME: ${{ github.repository_owner }}/lcli jobs: extract-branch-name: @@ -96,3 +97,22 @@ jobs: --amend ${IMAGE_NAME}:latest-arm64${TAG_SUFFIX} \ --amend ${IMAGE_NAME}:latest-amd64${TAG_SUFFIX}; docker manifest push ${IMAGE_NAME}:latest${TAG_SUFFIX} + build-docker-lcli: + runs-on: ubuntu-18.04 + needs: [extract-branch-name] + steps: + - uses: actions/checkout@v2 + - name: Dockerhub login + run: | + echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin + - name: Set Env + if: needs.extract-branch-name.outputs.BRANCH_NAME == 'unstable' + run: | + echo "TAG_SUFFIX=-unstable" >> $GITHUB_ENV; + - name: Build lcli dockerfile (with push) + run: | + docker build \ + --build-arg PORTABLE=true \ + --tag ${LCLI_IMAGE_NAME}:latest${TAG_SUFFIX} \ + --file ./lcli/Dockerfile . + docker push ${LCLI_IMAGE_NAME}:latest${TAG_SUFFIX} diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 82c1a81e33f..7f3ca3b0aae 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -146,6 +146,28 @@ jobs: run: sudo npm install -g ganache-cli - name: Run the syncing simulator run: cargo run --release --bin simulator syncing-sim + doppelganger-protection-test: + name: doppelganger-protection-test + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Install ganache-cli + run: sudo npm install -g ganache-cli + - name: Install lighthouse and lcli + run: | + make + make install-lcli + - name: Run the doppelganger protection success test script + run: | + cd scripts/tests + ./doppelganger_protection.sh success + - name: Run the doppelganger protection failure test script + run: | + cd scripts/tests + ./doppelganger_protection.sh failure check-benchmarks: name: check-benchmarks runs-on: ubuntu-latest diff --git a/Cargo.lock b/Cargo.lock index 8a8b14e51e4..8cb2cfeba93 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -83,31 +83,11 @@ checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" [[package]] name = "aead" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331" -dependencies = [ - "generic-array 0.14.4", -] - -[[package]] -name = "aead" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "922b33332f54fc0ad13fa3e514601e8d30fb54e1f3eadc36643f6526db645621" -dependencies = [ - "generic-array 0.14.4", -] - -[[package]] -name = "aes" -version = "0.5.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd2bc6d3f370b5666245ff421e231cba4353df936e26986d2918e61a8fd6aef6" +checksum = "6e3e798aa0c8239776f54415bc06f3d74b1850f3f830b45c35cfc80556973f70" dependencies = [ - "aes-soft", - "aesni", - "block-cipher", + "generic-array", ] [[package]] @@ -120,20 +100,7 @@ dependencies = [ "cipher 0.3.0", "cpufeatures", "ctr", - "opaque-debug 0.3.0", -] - -[[package]] -name = "aes-gcm" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0301c9e9c443494d970a07885e8cf3e587bae8356a1d5abd0999068413f7205f" -dependencies = [ - "aead 0.3.2", - "aes 0.5.0", - "block-cipher", - "ghash 0.3.1", - "subtle 2.4.0", + "opaque-debug", ] [[package]] @@ -142,33 +109,12 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc3be92e19a7ef47457b8e6f90707e12b6ac5d20c6f3866584fa3be0787d839f" dependencies = [ - "aead 0.4.1", - "aes 0.7.4", + "aead", + "aes", "cipher 0.3.0", "ctr", - "ghash 0.4.2", - "subtle 2.4.0", -] - -[[package]] -name = "aes-soft" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63dd91889c49327ad7ef3b500fd1109dbd3c509a03db0d4a9ce413b79f575cb6" -dependencies = [ - "block-cipher", - "byteorder", - "opaque-debug 0.3.0", -] - -[[package]] -name = "aesni" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a6fe808308bb07d393e2ea47780043ec47683fcf19cf5efc8ca51c50cc8c68a" -dependencies = [ - "block-cipher", - "opaque-debug 0.3.0", + "ghash", + "subtle", ] [[package]] @@ -211,9 +157,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15af2628f6890fe2609a3b91bef4c83450512802e59489f9c1cb1fa5df064a61" +checksum = "595d3cfa7a60d4555cb5067b99f07142a08ea778de5cf993f7b75c7d8fabc486" [[package]] name = "arbitrary" @@ -257,31 +203,12 @@ dependencies = [ "term", ] -[[package]] -name = "asn1_der" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fce6b6a0ffdafebd82c87e79e3f40e8d2c523e5fea5566ff6b90509bf98d638" -dependencies = [ - "asn1_der_derive", -] - [[package]] name = "asn1_der" version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d6e24d2cce90c53b948c46271bfb053e4bdc2db9b5d3f65e20f8cf28a1b7fc3" -[[package]] -name = "asn1_der_derive" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d0864d84b8e07b145449be9a8537db86bf9de5ce03b913214694643b4743502" -dependencies = [ - "quote", - "syn", -] - [[package]] name = "assert-json-diff" version = "2.0.1" @@ -424,6 +351,20 @@ dependencies = [ "wasm-bindgen-futures", ] +[[package]] +name = "async-std-resolver" +version = "0.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed4e2c3da14d8ad45acb1e3191db7a918e9505b6f155b218e70a7c9a1a48c638" +dependencies = [ + "async-std", + "async-trait", + "futures-io", + "futures-util", + "pin-utils", + "trust-dns-resolver", +] + [[package]] name = "async-task" version = "4.0.3" @@ -711,20 +652,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "10a5720225ef5daecf08657f23791354e1685a8c91a4c60c7f3d3b2892f978f4" dependencies = [ "crypto-mac 0.8.0", - "digest 0.9.0", - "opaque-debug 0.3.0", -] - -[[package]] -name = "block-buffer" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" -dependencies = [ - "block-padding 0.1.5", - "byte-tools", - "byteorder", - "generic-array 0.12.4", + "digest", + "opaque-debug", ] [[package]] @@ -733,26 +662,8 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "block-padding 0.2.1", - "generic-array 0.14.4", -] - -[[package]] -name = "block-cipher" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f337a3e6da609650eb74e02bc9fac7b735049f7623ab12f2e4c719316fcc7e80" -dependencies = [ - "generic-array 0.14.4", -] - -[[package]] -name = "block-padding" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" -dependencies = [ - "byte-tools", + "block-padding", + "generic-array", ] [[package]] @@ -875,12 +786,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "65c1bf4a04a88c54f589125563643d773f3254b5c38571395e2b591c693bbc81" -[[package]] -name = "byte-tools" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" - [[package]] name = "byteorder" version = "1.4.3" @@ -943,18 +848,18 @@ dependencies = [ [[package]] name = "cast" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57cdfa5d50aad6cb4d44dcab6101a7f79925bd59d82ca42f38a9856a28865374" +checksum = "4c24dab4283a142afa2fdca129b80ad2c6284e073930f964c3a1293c225ee39a" dependencies = [ - "rustc_version 0.3.3", + "rustc_version 0.4.0", ] [[package]] name = "cc" -version = "1.0.68" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a72c244c1ff497a746a7e1fb3d14bd08420ecda70c8f25c7112f2781652d787" +checksum = "e70cc2f62c6ce1868963827bd677764c62d07c3d9a3e1fb1177ee1a9ab199eb2" [[package]] name = "cfg-if" @@ -970,24 +875,26 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chacha20" -version = "0.5.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "244fbce0d47e97e8ef2f63b81d5e05882cb518c68531eb33194990d7b7e85845" +checksum = "fee7ad89dc1128635074c268ee661f90c3f7e83d9fd12910608c36b47d6c3412" dependencies = [ - "stream-cipher", + "cfg-if 1.0.0", + "cipher 0.3.0", + "cpufeatures", "zeroize", ] [[package]] name = "chacha20poly1305" -version = "0.6.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bf18d374d66df0c05cdddd528a7db98f78c28e2519b120855c4f84c5027b1f5" +checksum = "1580317203210c517b6d44794abfbe600698276db18127e37ad3e69bf5e848e5" dependencies = [ - "aead 0.3.2", + "aead", "chacha20", + "cipher 0.3.0", "poly1305", - "stream-cipher", "zeroize", ] @@ -1010,7 +917,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" dependencies = [ - "generic-array 0.14.4", + "generic-array", ] [[package]] @@ -1019,7 +926,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ee52072ec15386f770805afd189a01c8841be8696bed250fa2f13c4c0d6dfb7" dependencies = [ - "generic-array 0.14.4", + "generic-array", ] [[package]] @@ -1190,12 +1097,6 @@ dependencies = [ "libc", ] -[[package]] -name = "cpuid-bool" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcb25d077389e53838a8158c8e99174c5a9d902dee4904320db714f3c653ffba" - [[package]] name = "crc32fast" version = "1.2.1" @@ -1291,24 +1192,14 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" -[[package]] -name = "crypto-mac" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" -dependencies = [ - "generic-array 0.12.4", - "subtle 1.0.0", -] - [[package]] name = "crypto-mac" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" dependencies = [ - "generic-array 0.14.4", - "subtle 2.4.0", + "generic-array", + "subtle", ] [[package]] @@ -1317,8 +1208,8 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4857fd85a0c34b3c3297875b747c1e02e06b6a0ea32dd892d8192b9ce0813ea6" dependencies = [ - "generic-array 0.14.4", - "subtle 2.4.0", + "generic-array", + "subtle", ] [[package]] @@ -1327,8 +1218,8 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25fab6889090c8133f3deb8f73ba3c65a7f456f66436fc012a1b1e272b1e103e" dependencies = [ - "generic-array 0.14.4", - "subtle 2.4.0", + "generic-array", + "subtle", ] [[package]] @@ -1382,6 +1273,17 @@ dependencies = [ "winapi", ] +[[package]] +name = "cuckoofilter" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b810a8449931679f64cd7eef1bbd0fa315801b6d5d9cdc1ace2804d6529eee18" +dependencies = [ + "byteorder", + "fnv", + "rand 0.7.3", +] + [[package]] name = "curl" version = "0.4.38" @@ -1420,9 +1322,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "639891fde0dbea823fc3d798a0fdf9d2f9440a42d64a78ab3488b0ca025117b3" dependencies = [ "byteorder", - "digest 0.9.0", + "digest", "rand_core 0.5.1", - "subtle 2.4.0", + "subtle", "zeroize", ] @@ -1502,7 +1404,7 @@ dependencies = [ "hex", "reqwest", "serde_json", - "sha2 0.9.5", + "sha2", "tree_hash", "types", ] @@ -1541,13 +1443,14 @@ dependencies = [ [[package]] name = "derive_more" -version = "0.99.14" +version = "0.99.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc7b9cef1e351660e5443924e4f43ab25fbbed3e9a5f052df3677deb4d6b320" +checksum = "40eebddd2156ce1bb37b20bbe5151340a31828b1f2d22ba4141f3531710e38df" dependencies = [ "convert_case", "proc-macro2", "quote", + "rustc_version 0.3.3", "syn", ] @@ -1563,22 +1466,13 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198" -[[package]] -name = "digest" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" -dependencies = [ - "generic-array 0.12.4", -] - [[package]] name = "digest" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array 0.14.4", + "generic-array", ] [[package]] @@ -1640,27 +1534,27 @@ checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" [[package]] name = "discv5" -version = "0.1.0-beta.5" +version = "0.1.0-beta.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f5a5132ff1173d356fd78d09cd33d82fe8f7e6b4016d8c891edf2680a8cebe6" +checksum = "c594d301eb954fc101dec75f59a5291761e4ffe6169b249523b1086b9779b91f" dependencies = [ - "aes 0.7.4", - "aes-gcm 0.9.2", + "aes", + "aes-gcm", "arrayvec 0.7.1", - "digest 0.9.0", + "digest", "enr", "fnv", "futures", "hex", "hkdf", "lazy_static", - "libp2p-core 0.28.3", + "libp2p-core 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", "lru", "lru_time_cache", "parking_lot", "rand 0.8.4", "rlp 0.5.0", - "sha2 0.9.5", + "sha2", "smallvec", "tokio 1.8.1", "tokio-stream", @@ -1671,6 +1565,16 @@ dependencies = [ "zeroize", ] +[[package]] +name = "dns-parser" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4d33be9473d06f75f58220f71f7a9317aca647dc061dbd3c361b0bef505fbea" +dependencies = [ + "byteorder", + "quick-error", +] + [[package]] name = "dtoa" version = "0.4.8" @@ -1708,7 +1612,7 @@ dependencies = [ "ed25519", "rand 0.7.3", "serde", - "sha2 0.9.5", + "sha2", "zeroize", ] @@ -1754,11 +1658,11 @@ checksum = "c13e9b0c3c4170dcc2a12783746c4205d98e18957f57854251eea3f9750fe005" dependencies = [ "bitvec 0.20.4", "ff", - "generic-array 0.14.4", + "generic-array", "group", "pkcs8", "rand_core 0.6.3", - "subtle 2.4.0", + "subtle", "zeroize", ] @@ -1800,6 +1704,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "enum-as-inner" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c5f0096a91d210159eceb2ff5e1c4da18388a170e1e3ce948aac9c8fdbbf595" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "env_logger" version = "0.7.1" @@ -1945,7 +1861,7 @@ dependencies = [ "lazy_static", "ring", "rustc-hex", - "sha2 0.9.5", + "sha2", "wasm-bindgen-test", ] @@ -1972,7 +1888,7 @@ dependencies = [ "hex", "num-bigint-dig", "ring", - "sha2 0.9.5", + "sha2", "zeroize", ] @@ -1980,7 +1896,7 @@ dependencies = [ name = "eth2_keystore" version = "0.1.0" dependencies = [ - "aes 0.7.4", + "aes", "bls", "eth2_key_derivation", "eth2_ssz", @@ -1992,7 +1908,7 @@ dependencies = [ "serde", "serde_json", "serde_repr", - "sha2 0.9.5", + "sha2", "tempfile", "unicode-normalization", "uuid", @@ -2027,7 +1943,7 @@ dependencies = [ "regex", "serde", "serde_derive", - "sha2 0.9.5", + "sha2", "slog", "slog-async", "slog-term", @@ -2133,9 +2049,9 @@ dependencies = [ [[package]] name = "ethabi" -version = "14.0.0" +version = "14.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c52991643379afc90bfe2df3c64d53983e59c35a82ba6e75c997cfc2880d8524" +checksum = "a01317735d563b3bad2d5f90d2e1799f414165408251abb762510f40e790e69a" dependencies = [ "anyhow", "ethereum-types 0.11.0", @@ -2216,12 +2132,6 @@ dependencies = [ "futures", ] -[[package]] -name = "fake-simd" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" - [[package]] name = "fallback" version = "0.1.0" @@ -2258,7 +2168,7 @@ checksum = "72a4d941a5b7c2a75222e2d44fcdf634a67133d9db31e177ae5ff6ecda852bfe" dependencies = [ "bitvec 0.20.4", "rand_core 0.6.3", - "subtle 2.4.0", + "subtle", ] [[package]] @@ -2513,15 +2423,6 @@ dependencies = [ "slab", ] -[[package]] -name = "generic-array" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffdf9f34f1447443d37393cc6c2b8313aebddcd96906caf34e54c68d8e57d7bd" -dependencies = [ - "typenum", -] - [[package]] name = "generic-array" version = "0.14.4" @@ -2580,24 +2481,14 @@ dependencies = [ "wasi 0.10.2+wasi-snapshot-preview1", ] -[[package]] -name = "ghash" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97304e4cd182c3846f7575ced3890c53012ce534ad9114046b0a9e00bb30a375" -dependencies = [ - "opaque-debug 0.3.0", - "polyval 0.4.5", -] - [[package]] name = "ghash" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7bbd60caa311237d508927dbba7594b483db3ef05faa55172fcf89b1bcda7853" dependencies = [ - "opaque-debug 0.3.0", - "polyval 0.5.1", + "opaque-debug", + "polyval", ] [[package]] @@ -2655,7 +2546,7 @@ checksum = "61b3c1e8b4f1ca07e6605ea1be903a5f6956aec5c8a67fd44d56076631675ed8" dependencies = [ "ff", "rand_core 0.6.3", - "subtle 2.4.0", + "subtle", ] [[package]] @@ -2777,20 +2668,10 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "01706d578d5c281058480e673ae4086a9f4710d8df1ad80a5b03e39ece5f886b" dependencies = [ - "digest 0.9.0", + "digest", "hmac 0.11.0", ] -[[package]] -name = "hmac" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695" -dependencies = [ - "crypto-mac 0.7.0", - "digest 0.8.1", -] - [[package]] name = "hmac" version = "0.8.1" @@ -2798,7 +2679,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" dependencies = [ "crypto-mac 0.8.0", - "digest 0.9.0", + "digest", ] [[package]] @@ -2808,7 +2689,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1441c6b1e930e2817404b5046f1f989899143a12bf92de603b69f4e0aee1e15" dependencies = [ "crypto-mac 0.10.0", - "digest 0.9.0", + "digest", ] [[package]] @@ -2818,18 +2699,29 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" dependencies = [ "crypto-mac 0.11.0", - "digest 0.9.0", + "digest", ] [[package]] name = "hmac-drbg" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6e570451493f10f6581b48cdd530413b63ea9e780f544bfd3bdcaa0d89d1a7b" +checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" dependencies = [ - "digest 0.8.1", - "generic-array 0.12.4", - "hmac 0.7.1", + "digest", + "generic-array", + "hmac 0.8.1", +] + +[[package]] +name = "hostname" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" +dependencies = [ + "libc", + "match_cfg", + "winapi", ] [[package]] @@ -2860,7 +2752,6 @@ version = "0.1.0" dependencies = [ "beacon_chain", "bs58", - "discv5", "environment", "eth1", "eth2", @@ -2960,9 +2851,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.9" +version = "0.14.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07d6baa1b441335f3ce5098ac421fb6547c46dda735ca1bc6d0153c838f9dd83" +checksum = "7728a72c4c7d72665fde02204bcbd93b247721025b222ef78606f14513e0fd03" dependencies = [ "bytes 1.0.1", "futures-channel", @@ -3035,9 +2926,9 @@ dependencies = [ [[package]] name = "if-watch" -version = "0.1.8" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b8538953a3f0d0d3868f0a706eb4273535e10d72acb5c82c1c23ae48835c85" +checksum = "ae8ab7f67bad3240049cb24fb9cb0b4c2c6af4c245840917fbbdededeee91179" dependencies = [ "async-io", "futures", @@ -3139,9 +3030,9 @@ dependencies = [ [[package]] name = "instant" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" +checksum = "bee0328b1209d157ef001c94dd85b4f8f64139adb0eac2659f4b08382b2f474d" dependencies = [ "cfg-if 1.0.0", ] @@ -3164,6 +3055,18 @@ dependencies = [ "num-traits", ] +[[package]] +name = "ipconfig" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7e2f18aece9709094573a9f24f483c4f65caa4298e2f7ae1b71cc65d853fad7" +dependencies = [ + "socket2 0.3.19", + "widestring", + "winapi", + "winreg 0.6.2", +] + [[package]] name = "ipnet" version = "2.3.1" @@ -3253,7 +3156,7 @@ dependencies = [ "cfg-if 1.0.0", "ecdsa", "elliptic-curve", - "sha2 0.9.5", + "sha2", ] [[package]] @@ -3379,9 +3282,9 @@ checksum = "db13adb97ab515a3691f56e4dbab09283d0b86cb45abd991d8634a9d6f501760" [[package]] name = "libc" -version = "0.2.97" +version = "0.2.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12b8adadd720df158f4d70dfe7ccc6adb0472d7c55ca83445f6a5ab3e36f8fb6" +checksum = "320cfe77175da3a483efed4bc0adc1968ca050b098ce4f2f1c13a56626128790" [[package]] name = "libflate" @@ -3421,26 +3324,36 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.35.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc225a49973cf9ab10d0cdd6a4b8f0cda299df9b760824bbb623f15f8f0c95a" +version = "0.39.1" +source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" dependencies = [ "atomic", "bytes 1.0.1", "futures", "lazy_static", - "libp2p-core 0.27.1", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-deflate", "libp2p-dns", + "libp2p-floodsub", "libp2p-gossipsub", "libp2p-identify", + "libp2p-kad", + "libp2p-mdns", "libp2p-mplex", "libp2p-noise", + "libp2p-ping", + "libp2p-plaintext", + "libp2p-pnet", + "libp2p-relay", + "libp2p-request-response", "libp2p-swarm", "libp2p-swarm-derive", "libp2p-tcp", + "libp2p-uds", + "libp2p-wasm-ext", "libp2p-websocket", "libp2p-yamux", - "parity-multiaddr", + "multiaddr", "parking_lot", "pin-project 1.0.7", "smallvec", @@ -3449,11 +3362,11 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.27.1" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a2d56aadc2c2bf22cd7797f86e56a65b5b3994a0136b65be3106938acae7a26" +checksum = "af9b4abdeaa420593a297c8592f63fad4234f4b88dc9343b8fd8e736c35faa59" dependencies = [ - "asn1_der 0.6.3", + "asn1_der", "bs58", "ed25519-dalek", "either", @@ -3463,9 +3376,9 @@ dependencies = [ "lazy_static", "libsecp256k1", "log", + "multiaddr", "multihash", - "multistream-select", - "parity-multiaddr", + "multistream-select 0.10.2", "parking_lot", "pin-project 1.0.7", "prost", @@ -3473,7 +3386,7 @@ dependencies = [ "rand 0.7.3", "ring", "rw-stream-sink", - "sha2 0.9.5", + "sha2", "smallvec", "thiserror", "unsigned-varint 0.7.0", @@ -3483,11 +3396,10 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.28.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "554d3e7e9e65f939d66b75fd6a4c67f258fe250da61b91f46c545fc4a89b51d9" +version = "0.29.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" dependencies = [ - "asn1_der 0.7.4", + "asn1_der", "bs58", "ed25519-dalek", "either", @@ -3497,9 +3409,9 @@ dependencies = [ "lazy_static", "libsecp256k1", "log", + "multiaddr", "multihash", - "multistream-select", - "parity-multiaddr", + "multistream-select 0.10.3", "parking_lot", "pin-project 1.0.7", "prost", @@ -3507,7 +3419,7 @@ dependencies = [ "rand 0.7.3", "ring", "rw-stream-sink", - "sha2 0.9.5", + "sha2", "smallvec", "thiserror", "unsigned-varint 0.7.0", @@ -3515,22 +3427,50 @@ dependencies = [ "zeroize", ] +[[package]] +name = "libp2p-deflate" +version = "0.29.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +dependencies = [ + "flate2", + "futures", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", +] + [[package]] name = "libp2p-dns" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5153b6db68fd4baa3b304e377db744dd8fea8ff4e4504509ee636abcde88d3e3" +version = "0.29.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +dependencies = [ + "async-std-resolver", + "futures", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "log", + "smallvec", + "trust-dns-resolver", +] + +[[package]] +name = "libp2p-floodsub" +version = "0.30.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" dependencies = [ + "cuckoofilter", + "fnv", "futures", - "libp2p-core 0.27.1", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-swarm", "log", + "prost", + "prost-build", + "rand 0.7.3", + "smallvec", ] [[package]] name = "libp2p-gossipsub" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "502dc5fcbfec4aa1c63ef3f7307ffe20e90c1a1387bf23ed0bec087f2dde58a1" +version = "0.32.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" dependencies = [ "asynchronous-codec", "base64 0.13.0", @@ -3539,14 +3479,14 @@ dependencies = [ "fnv", "futures", "hex_fmt", - "libp2p-core 0.27.1", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", "libp2p-swarm", "log", "prost", "prost-build", "rand 0.7.3", "regex", - "sha2 0.9.5", + "sha2", "smallvec", "unsigned-varint 0.7.0", "wasm-timer", @@ -3554,30 +3494,73 @@ dependencies = [ [[package]] name = "libp2p-identify" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b40fb36a059b7a8cce1514bd8b546fa612e006c9937caa7f5950cb20021fe91e" +version = "0.30.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +dependencies = [ + "futures", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-swarm", + "log", + "prost", + "prost-build", + "smallvec", + "wasm-timer", +] + +[[package]] +name = "libp2p-kad" +version = "0.31.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" dependencies = [ + "arrayvec 0.5.2", + "asynchronous-codec", + "bytes 1.0.1", + "either", + "fnv", "futures", - "libp2p-core 0.27.1", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", "libp2p-swarm", "log", "prost", "prost-build", + "rand 0.7.3", + "sha2", "smallvec", + "uint 0.9.1", + "unsigned-varint 0.7.0", + "void", "wasm-timer", ] +[[package]] +name = "libp2p-mdns" +version = "0.31.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +dependencies = [ + "async-io", + "data-encoding", + "dns-parser", + "futures", + "if-watch", + "lazy_static", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-swarm", + "log", + "rand 0.8.4", + "smallvec", + "socket2 0.4.0", + "void", +] + [[package]] name = "libp2p-mplex" -version = "0.27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "350ce8b3923594aedabd5d6e3f875d058435052a29c3f32df378bc70d10be464" +version = "0.29.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" dependencies = [ "asynchronous-codec", "bytes 1.0.1", "futures", - "libp2p-core 0.27.1", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", "log", "nohash-hasher", "parking_lot", @@ -3588,35 +3571,117 @@ dependencies = [ [[package]] name = "libp2p-noise" -version = "0.29.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4aca322b52a0c5136142a7c3971446fb1e9964923a526c9cc6ef3b7c94e57778" +version = "0.32.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" dependencies = [ "bytes 1.0.1", "curve25519-dalek", "futures", "lazy_static", - "libp2p-core 0.27.1", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", "log", "prost", "prost-build", - "rand 0.7.3", - "sha2 0.9.5", + "rand 0.8.4", + "sha2", "snow", "static_assertions", "x25519-dalek", "zeroize", ] +[[package]] +name = "libp2p-ping" +version = "0.30.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +dependencies = [ + "futures", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-swarm", + "log", + "rand 0.7.3", + "void", + "wasm-timer", +] + +[[package]] +name = "libp2p-plaintext" +version = "0.29.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +dependencies = [ + "asynchronous-codec", + "bytes 1.0.1", + "futures", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "log", + "prost", + "prost-build", + "unsigned-varint 0.7.0", + "void", +] + +[[package]] +name = "libp2p-pnet" +version = "0.21.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +dependencies = [ + "futures", + "log", + "pin-project 1.0.7", + "rand 0.7.3", + "salsa20 0.8.0", + "sha3", +] + +[[package]] +name = "libp2p-relay" +version = "0.3.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +dependencies = [ + "asynchronous-codec", + "bytes 1.0.1", + "futures", + "futures-timer", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-swarm", + "log", + "pin-project 1.0.7", + "prost", + "prost-build", + "rand 0.7.3", + "smallvec", + "unsigned-varint 0.7.0", + "void", + "wasm-timer", +] + +[[package]] +name = "libp2p-request-response" +version = "0.12.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +dependencies = [ + "async-trait", + "bytes 1.0.1", + "futures", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-swarm", + "log", + "lru", + "minicbor", + "rand 0.7.3", + "smallvec", + "unsigned-varint 0.7.0", + "wasm-timer", +] + [[package]] name = "libp2p-swarm" -version = "0.27.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7955b973e1fd2bd61ffd43ce261c1223f61f4aacd5bae362a924993f9a25fd98" +version = "0.30.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" dependencies = [ "either", "futures", - "libp2p-core 0.27.1", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", "log", "rand 0.7.3", "smallvec", @@ -3626,9 +3691,8 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c564ebaa36a64839f51eaddb0243aaaa29ce64affb56129193cc3248b72af273" +version = "0.24.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" dependencies = [ "quote", "syn", @@ -3636,9 +3700,8 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88a5aef80e519a6cb8e2663605142f97baaaea1a252eecbf8756184765f7471b" +version = "0.29.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" dependencies = [ "async-io", "futures", @@ -3647,22 +3710,45 @@ dependencies = [ "if-watch", "ipnet", "libc", - "libp2p-core 0.27.1", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", "log", - "socket2 0.3.19", + "socket2 0.4.0", "tokio 1.8.1", ] +[[package]] +name = "libp2p-uds" +version = "0.29.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +dependencies = [ + "async-std", + "futures", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "log", +] + +[[package]] +name = "libp2p-wasm-ext" +version = "0.29.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +dependencies = [ + "futures", + "js-sys", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "parity-send-wrapper", + "wasm-bindgen", + "wasm-bindgen-futures", +] + [[package]] name = "libp2p-websocket" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3b1c6a3431045da8b925ed83384e4c5163e14b990572307fca9c507435d4d22" +version = "0.30.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" dependencies = [ "either", "futures", "futures-rustls", - "libp2p-core 0.27.1", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", "log", "quicksink", "rw-stream-sink", @@ -3673,12 +3759,11 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.30.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4819358c542a86ff95f6ae691efb4b94ddaf477079b01a686f5705b79bfc232a" +version = "0.33.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" dependencies = [ "futures", - "libp2p-core 0.27.1", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", "parking_lot", "thiserror", "yamux", @@ -3686,20 +3771,52 @@ dependencies = [ [[package]] name = "libsecp256k1" -version = "0.3.5" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc1e2c808481a63dc6da2074752fdd4336a3c8fcc68b83db6f1fd5224ae7962" +checksum = "bd1137239ab33b41aa9637a88a28249e5e70c40a42ccc92db7f12cc356c1fcd7" dependencies = [ "arrayref", - "crunchy", - "digest 0.8.1", + "base64 0.12.3", + "digest", "hmac-drbg", + "libsecp256k1-core", + "libsecp256k1-gen-ecmult", + "libsecp256k1-gen-genmult", "rand 0.7.3", - "sha2 0.8.2", - "subtle 2.4.0", + "serde", + "sha2", "typenum", ] +[[package]] +name = "libsecp256k1-core" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ee11012b293ea30093c129173cac4335513064094619f4639a25b310fd33c11" +dependencies = [ + "crunchy", + "digest", + "subtle", +] + +[[package]] +name = "libsecp256k1-gen-ecmult" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32239626ffbb6a095b83b37a02ceb3672b2443a87a000a884fc3c4d16925c9c0" +dependencies = [ + "libsecp256k1-core", +] + +[[package]] +name = "libsecp256k1-gen-genmult" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76acb433e21d10f5f9892b1962c2856c58c7f39a9e4bd68ac82b9436a0ffd5b9" +dependencies = [ + "libsecp256k1-core", +] + [[package]] name = "libsqlite3-sys" version = "0.20.1" @@ -3853,6 +3970,15 @@ dependencies = [ "hashbrown 0.9.1", ] +[[package]] +name = "lru-cache" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" +dependencies = [ + "linked-hash-map", +] + [[package]] name = "lru_cache" version = "0.1.0" @@ -3891,6 +4017,12 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" +[[package]] +name = "match_cfg" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" + [[package]] name = "matchers" version = "0.0.1" @@ -3961,6 +4093,26 @@ dependencies = [ "unicase", ] +[[package]] +name = "minicbor" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51aa5bb0ca22415daca596a227b507f880ad1b2318a87fa9325312a5d285ca0d" +dependencies = [ + "minicbor-derive", +] + +[[package]] +name = "minicbor-derive" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f2b9e8883d58e34b18facd16c4564a77ea50fce028ad3d0ee6753440e37acc8" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "miniz_oxide" version = "0.4.4" @@ -4014,17 +4166,35 @@ dependencies = [ "tokio 1.8.1", ] +[[package]] +name = "multiaddr" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48ee4ea82141951ac6379f964f71b20876d43712bea8faf6dd1a375e08a46499" +dependencies = [ + "arrayref", + "bs58", + "byteorder", + "data-encoding", + "multihash", + "percent-encoding", + "serde", + "static_assertions", + "unsigned-varint 0.7.0", + "url", +] + [[package]] name = "multihash" -version = "0.13.2" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dac63698b887d2d929306ea48b63760431ff8a24fac40ddb22f9c7f49fb7cab" +checksum = "752a61cd890ff691b4411423d23816d5866dd5621e4d1c5687a53b94b5a979d8" dependencies = [ - "digest 0.9.0", - "generic-array 0.14.4", + "digest", + "generic-array", "multihash-derive", - "sha2 0.9.5", - "unsigned-varint 0.5.1", + "sha2", + "unsigned-varint 0.7.0", ] [[package]] @@ -4079,6 +4249,19 @@ dependencies = [ "unsigned-varint 0.7.0", ] +[[package]] +name = "multistream-select" +version = "0.10.3" +source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +dependencies = [ + "bytes 1.0.1", + "futures", + "log", + "pin-project 1.0.7", + "smallvec", + "unsigned-varint 0.7.0", +] + [[package]] name = "native-tls" version = "0.2.7" @@ -4102,7 +4285,6 @@ name = "network" version = "0.2.0" dependencies = [ "beacon_chain", - "discv5", "environment", "error-chain", "eth2_libp2p", @@ -4310,12 +4492,6 @@ version = "11.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" -[[package]] -name = "opaque-debug" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" - [[package]] name = "opaque-debug" version = "0.3.0" @@ -4370,6 +4546,7 @@ name = "operation_pool" version = "0.2.0" dependencies = [ "beacon_chain", + "derivative", "eth2_ssz", "eth2_ssz_derive", "int_to_bytes", @@ -4383,27 +4560,10 @@ dependencies = [ "serde_derive", "state_processing", "store", + "superstruct", "types", ] -[[package]] -name = "parity-multiaddr" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58341485071825827b7f03cf7efd1cb21e6a709bea778fb50227fd45d2f361b4" -dependencies = [ - "arrayref", - "bs58", - "byteorder", - "data-encoding", - "multihash", - "percent-encoding", - "serde", - "static_assertions", - "unsigned-varint 0.7.0", - "url", -] - [[package]] name = "parity-scale-codec" version = "1.3.7" @@ -4442,6 +4602,12 @@ dependencies = [ "syn", ] +[[package]] +name = "parity-send-wrapper" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa9777aa91b8ad9dd5aaa04a9b6bcb02c7f1deb952fca5a66034d5e63afc5c6f" + [[package]] name = "parking" version = "2.0.0" @@ -4626,9 +4792,9 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd8be10f7485c8a323ea100b20d6052c27cf5968f08f8e3a56ee9f0cf38ebd3d" +checksum = "d88417318da0eaf0fdcdb51a0ee6c3bed624333bff8f946733049380be67ac1c" [[package]] name = "plotters-svg" @@ -4654,22 +4820,12 @@ dependencies = [ [[package]] name = "poly1305" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b7456bc1ad2d4cf82b3a016be4c2ac48daf11bf990c1603ebd447fe6f30fca8" -dependencies = [ - "cpuid-bool", - "universal-hash", -] - -[[package]] -name = "polyval" -version = "0.4.5" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eebcc4aa140b9abd2bc40d9c3f7ccec842679cd79045ac3a7ac698c1a064b7cd" +checksum = "4fe800695325da85083cd23b56826fccb2e2dc29b218e7811a6f33bc93f414be" dependencies = [ - "cpuid-bool", - "opaque-debug 0.3.0", + "cpufeatures", + "opaque-debug", "universal-hash", ] @@ -4681,7 +4837,7 @@ checksum = "e597450cbf209787f0e6de80bf3795c6b2356a380ee87837b545aded8dbc1823" dependencies = [ "cfg-if 1.0.0", "cpufeatures", - "opaque-debug 0.3.0", + "opaque-debug", "universal-hash", ] @@ -4807,9 +4963,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e6984d2f1a23009bd270b8bb56d0926810a3d483f59c987d77969e9d8e840b2" +checksum = "de5e2533f59d08fcf364fd374ebda0692a70bd6d7e66ef97f306f45c6c5d8020" dependencies = [ "bytes 1.0.1", "prost-derive", @@ -4817,13 +4973,13 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32d3ebd75ac2679c2af3a92246639f9fcc8a442ee420719cc4fe195b98dd5fa3" +checksum = "355f634b43cdd80724ee7848f95770e7e70eefa6dcf14fea676216573b8fd603" dependencies = [ "bytes 1.0.1", "heck", - "itertools 0.9.0", + "itertools 0.10.1", "log", "multimap", "petgraph", @@ -4835,12 +4991,12 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "169a15f3008ecb5160cba7d37bcd690a7601b6d30cfb87a117d45e59d52af5d4" +checksum = "600d2f334aa05acb02a755e217ef1ab6dea4d51b58b7846588b747edec04efba" dependencies = [ "anyhow", - "itertools 0.9.0", + "itertools 0.10.1", "proc-macro2", "quote", "syn", @@ -4848,9 +5004,9 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b518d7cdd93dab1d1122cf07fa9a60771836c668dde9d9e2a139f957f0d9f1bb" +checksum = "603bbd6394701d13f3f25aada59c7de9d35a6a5887cfc156181234a44002771b" dependencies = [ "bytes 1.0.1", "prost", @@ -5275,7 +5431,17 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "winreg", + "winreg 0.7.0", +] + +[[package]] +name = "resolv-conf" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" +dependencies = [ + "hostname", + "quick-error", ] [[package]] @@ -5379,6 +5545,15 @@ dependencies = [ "semver 0.11.0", ] +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver 1.0.3", +] + [[package]] name = "rustls" version = "0.19.1" @@ -5434,6 +5609,15 @@ dependencies = [ "cipher 0.2.5", ] +[[package]] +name = "salsa20" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c7c5f10864beba947e1a1b43f3ef46c8cc58d1c2ae549fa471713e8ff60787a" +dependencies = [ + "cipher 0.3.0", +] + [[package]] name = "same-file" version = "1.0.6" @@ -5482,8 +5666,8 @@ checksum = "8da492dab03f925d977776a0b7233d7b934d6dc2b94faead48928e2e9bacedb9" dependencies = [ "hmac 0.10.1", "pbkdf2 0.6.0", - "salsa20", - "sha2 0.9.5", + "salsa20 0.7.2", + "sha2", ] [[package]] @@ -5555,6 +5739,12 @@ dependencies = [ "semver-parser 0.10.2", ] +[[package]] +name = "semver" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f3aac57ee7f3272d8395c6e4f502f434f0e289fcd62876f70daa008c20dcabe" + [[package]] name = "semver-parser" version = "0.7.0" @@ -5680,11 +5870,11 @@ version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c4cfa741c5832d0ef7fab46cabed29c2aae926db0b11bb2069edd8db5e64e16" dependencies = [ - "block-buffer 0.9.0", + "block-buffer", "cfg-if 1.0.0", "cpufeatures", - "digest 0.9.0", - "opaque-debug 0.3.0", + "digest", + "opaque-debug", ] [[package]] @@ -5693,29 +5883,17 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" -[[package]] -name = "sha2" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" -dependencies = [ - "block-buffer 0.7.3", - "digest 0.8.1", - "fake-simd", - "opaque-debug 0.2.3", -] - [[package]] name = "sha2" version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b362ae5752fd2137731f9fa25fd4d9058af34666ca1966fb969119cc35719f12" dependencies = [ - "block-buffer 0.9.0", + "block-buffer", "cfg-if 1.0.0", "cpufeatures", - "digest 0.9.0", - "opaque-debug 0.3.0", + "digest", + "opaque-debug", ] [[package]] @@ -5724,10 +5902,10 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", + "block-buffer", + "digest", "keccak", - "opaque-debug 0.3.0", + "opaque-debug", ] [[package]] @@ -5764,7 +5942,7 @@ version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c19772be3c4dd2ceaacf03cb41d5885f2a02c4d8804884918e3a258480803335" dependencies = [ - "digest 0.9.0", + "digest", "rand_core 0.6.3", ] @@ -5833,7 +6011,7 @@ dependencies = [ "serde", "serde_derive", "slog", - "sloggers 2.0.0", + "sloggers 2.0.1", "tempfile", "tree_hash", "tree_hash_derive", @@ -5974,9 +6152,9 @@ dependencies = [ [[package]] name = "sloggers" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92ee08a52260ed355f96069884bf8873f2439231f8754cbd545291d647ebbd75" +checksum = "7071b1119e436e93157c2e9e134138d9d8716dfe5e2f472500119bcbe4f45a4e" dependencies = [ "chrono", "libc", @@ -6028,19 +6206,19 @@ checksum = "45456094d1983e2ee2a18fdfebce3189fa451699d0502cb8e3b49dba5ba41451" [[package]] name = "snow" -version = "0.7.2" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "795dd7aeeee24468e5a32661f6d27f7b5cbed802031b2d7640c7b10f8fb2dd50" +checksum = "6142f7c25e94f6fd25a32c3348ec230df9109b463f59c8c7acc4bd34936babb7" dependencies = [ - "aes-gcm 0.7.0", + "aes-gcm", "blake2", "chacha20poly1305", - "rand 0.7.3", - "rand_core 0.5.1", + "rand 0.8.4", + "rand_core 0.6.3", "ring", - "rustc_version 0.2.3", - "sha2 0.9.5", - "subtle 2.4.0", + "rustc_version 0.3.3", + "sha2", + "subtle", "x25519-dalek", ] @@ -6120,6 +6298,7 @@ dependencies = [ "integer-sqrt", "itertools 0.10.1", "lazy_static", + "lighthouse_metrics", "log", "merkle_proof", "rayon", @@ -6224,16 +6403,6 @@ dependencies = [ "types", ] -[[package]] -name = "stream-cipher" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c80e15f898d8d8f25db24c253ea615cc14acf418ff307822995814e7d42cfa89" -dependencies = [ - "block-cipher", - "generic-array 0.14.4", -] - [[package]] name = "string_cache" version = "0.8.1" @@ -6281,15 +6450,9 @@ dependencies = [ [[package]] name = "subtle" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" - -[[package]] -name = "subtle" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" +checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "superstruct" @@ -6326,9 +6489,9 @@ dependencies = [ [[package]] name = "synstructure" -version = "0.12.4" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" +checksum = "474aaa926faa1603c40b7885a9eaea29b444d1cb2850cb7c0e37bb1a4182f4fa" dependencies = [ "proc-macro2", "quote", @@ -6426,18 +6589,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.25" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa6f76457f59514c7eeb4e59d891395fab0b2fd1d40723ae737d64153392e9c6" +checksum = "93119e4feac1cbe6c798c34d3a53ea0026b0b1de6a120deef895137c0529bfe2" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.25" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a36768c0fbf1bb15eca10defa29526bda730a2376c2ab4393ccfa16fb1a318d" +checksum = "060d69a0afe7796bf42e9e2ff91f5ee691fb15c53d38b4b62a9a53eb23164745" dependencies = [ "proc-macro2", "quote", @@ -6536,7 +6699,7 @@ dependencies = [ "pbkdf2 0.4.0", "rand 0.7.3", "rustc-hash", - "sha2 0.9.5", + "sha2", "thiserror", "unicode-normalization", "zeroize", @@ -6643,9 +6806,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c49e3df43841dafb86046472506755d8501c5615673955f6aa17181125d13c37" +checksum = "54473be61f4ebe4efd09cec9bd5d16fa51d70ea0192213d754d2d500457db110" dependencies = [ "proc-macro2", "quote", @@ -6664,9 +6827,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8864d706fdb3cc0843a49647ac892720dac98a6eeb818b77190592cf4994066" +checksum = "7b2f3f698253f03119ac0102beaa64f67a67e08074d03a22d18784104543727f" dependencies = [ "futures-core", "pin-project-lite 0.2.7", @@ -6845,6 +7008,51 @@ dependencies = [ "syn", ] +[[package]] +name = "trust-dns-proto" +version = "0.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad0d7f5db438199a6e2609debe3f69f808d074e0a2888ee0bccb45fe234d03f4" +dependencies = [ + "async-trait", + "cfg-if 1.0.0", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna", + "ipnet", + "lazy_static", + "log", + "rand 0.8.4", + "smallvec", + "thiserror", + "tinyvec", + "tokio 1.8.1", + "url", +] + +[[package]] +name = "trust-dns-resolver" +version = "0.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6ad17b608a64bd0735e67bde16b0636f8aa8591f831a25d18443ed00a699770" +dependencies = [ + "cfg-if 1.0.0", + "futures-util", + "ipconfig", + "lazy_static", + "log", + "lru-cache", + "parking_lot", + "resolv-conf", + "smallvec", + "thiserror", + "tokio 1.8.1", + "trust-dns-proto", +] + [[package]] name = "try-lock" version = "0.2.3" @@ -6996,9 +7204,9 @@ dependencies = [ [[package]] name = "unicode-segmentation" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb0d2e7be6ae3a5fa87eed5fb451aff96f2573d2694942e40543ae0bbe19c796" +checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b" [[package]] name = "unicode-width" @@ -7018,16 +7226,10 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" dependencies = [ - "generic-array 0.14.4", - "subtle 2.4.0", + "generic-array", + "subtle", ] -[[package]] -name = "unsigned-varint" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7fdeedbf205afadfe39ae559b75c3240f24e257d0ca27e85f85cb82aa19ac35" - [[package]] name = "unsigned-varint" version = "0.6.0" @@ -7046,6 +7248,8 @@ checksum = "5f8d425fafb8cd76bc3f22aace4af471d3156301d7508f2107e98fbeae10bc7f" dependencies = [ "asynchronous-codec", "bytes 1.0.1", + "futures-io", + "futures-util", ] [[package]] @@ -7132,6 +7336,7 @@ dependencies = [ "slog-async", "slog-term", "slot_clock", + "task_executor", "tempfile", "tokio 1.8.1", "tree_hash", @@ -7406,7 +7611,7 @@ dependencies = [ "base64 0.13.0", "bytes 1.0.1", "derive_more", - "ethabi 14.0.0", + "ethabi 14.1.0", "ethereum-types 0.11.0", "futures", "futures-timer", @@ -7528,6 +7733,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "winreg" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" +dependencies = [ + "winapi", +] + [[package]] name = "winreg" version = "0.7.0" @@ -7580,15 +7794,15 @@ dependencies = [ [[package]] name = "yamux" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cc7bd8c983209ed5d527f44b01c41b7dc146fd960c61cf9e1d25399841dc271" +checksum = "e7d9028f208dd5e63c614be69f115c1b53cacc1111437d4c765185856666c107" dependencies = [ "futures", "log", "nohash-hasher", "parking_lot", - "rand 0.7.3", + "rand 0.8.4", "static_assertions", ] diff --git a/Makefile b/Makefile index dd19a0f4a7d..db88ae2bb01 100644 --- a/Makefile +++ b/Makefile @@ -95,7 +95,7 @@ cargo-fmt: check-benches: cargo check --workspace --benches -# Typechecks consensus code *without* allowing deprecated legacy arithmetic +# Typechecks consensus code *without* allowing deprecated legacy arithmetic or metrics. check-consensus: cargo check --manifest-path=consensus/state_processing/Cargo.toml --no-default-features @@ -151,7 +151,7 @@ arbitrary-fuzz: # Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database) audit: cargo install --force cargo-audit - cargo audit --ignore RUSTSEC-2021-0073 + cargo audit --ignore RUSTSEC-2021-0073 --ignore RUSTSEC-2021-0076 # Runs `cargo udeps` to check for unused dependencies udeps: diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index 4986151c038..ceb8cb8abb9 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -27,7 +27,7 @@ eth2_wallet = { path = "../crypto/eth2_wallet" } eth2_wallet_manager = { path = "../common/eth2_wallet_manager" } rand = "0.7.3" validator_dir = { path = "../common/validator_dir" } -tokio = { version = "1.1.0", features = ["full"] } +tokio = { version = "1.7.1", features = ["full"] } eth2_keystore = { path = "../crypto/eth2_keystore" } account_utils = { path = "../common/account_utils" } slashing_protection = { path = "../validator_client/slashing_protection" } diff --git a/account_manager/src/common.rs b/account_manager/src/common.rs index 0d76380b02d..ce42615e507 100644 --- a/account_manager/src/common.rs +++ b/account_manager/src/common.rs @@ -19,7 +19,7 @@ pub fn read_mnemonic_from_cli( .map_err(|e| format!("Unable to read {:?}: {:?}", path, e)) .and_then(|bytes| { let bytes_no_newlines: PlainText = strip_off_newlines(bytes).into(); - let phrase = from_utf8(&bytes_no_newlines.as_ref()) + let phrase = from_utf8(bytes_no_newlines.as_ref()) .map_err(|e| format!("Unable to derive mnemonic: {:?}", e))?; Mnemonic::from_phrase(phrase, Language::English).map_err(|e| { format!( diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index c8bac168eed..738cbf16f03 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -51,7 +51,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .long(BEACON_SERVER_FLAG) .value_name("NETWORK_ADDRESS") .help("Address to a beacon node HTTP API") - .default_value(&DEFAULT_BEACON_NODE) + .default_value(DEFAULT_BEACON_NODE) .takes_value(true), ) .arg( diff --git a/account_manager/src/validator/slashing_protection.rs b/account_manager/src/validator/slashing_protection.rs index 902e26528f7..6ad60bb515a 100644 --- a/account_manager/src/validator/slashing_protection.rs +++ b/account_manager/src/validator/slashing_protection.rs @@ -87,8 +87,8 @@ pub fn cli_run( match matches.subcommand() { (IMPORT_CMD, Some(matches)) => { - let import_filename: PathBuf = clap_utils::parse_required(&matches, IMPORT_FILE_ARG)?; - let minify: bool = clap_utils::parse_required(&matches, MINIFY_FLAG)?; + let import_filename: PathBuf = clap_utils::parse_required(matches, IMPORT_FILE_ARG)?; + let minify: bool = clap_utils::parse_required(matches, MINIFY_FLAG)?; let import_file = File::open(&import_filename).map_err(|e| { format!( "Unable to open import file at {}: {:?}", @@ -199,8 +199,8 @@ pub fn cli_run( Ok(()) } (EXPORT_CMD, Some(matches)) => { - let export_filename: PathBuf = clap_utils::parse_required(&matches, EXPORT_FILE_ARG)?; - let minify: bool = clap_utils::parse_required(&matches, MINIFY_FLAG)?; + let export_filename: PathBuf = clap_utils::parse_required(matches, EXPORT_FILE_ARG)?; + let minify: bool = clap_utils::parse_required(matches, MINIFY_FLAG)?; if !slashing_protection_db_path.exists() { return Err(format!( diff --git a/account_manager/src/wallet/create.rs b/account_manager/src/wallet/create.rs index 1390f35033c..9ebaeae5f1a 100644 --- a/account_manager/src/wallet/create.rs +++ b/account_manager/src/wallet/create.rs @@ -114,7 +114,7 @@ pub fn cli_run(matches: &ArgMatches, wallet_base_dir: PathBuf) -> Result<(), Str Language::English, ); - let wallet = create_wallet_from_mnemonic(matches, &wallet_base_dir.as_path(), &mnemonic)?; + let wallet = create_wallet_from_mnemonic(matches, wallet_base_dir.as_path(), &mnemonic)?; if let Some(path) = mnemonic_output_path { create_with_600_perms(&path, mnemonic.phrase().as_bytes()) @@ -168,7 +168,7 @@ pub fn create_wallet_from_mnemonic( if !path.exists() { // To prevent users from accidentally supplying their password to the PASSWORD_FLAG and // create a file with that name, we require that the password has a .pass suffix. - if path.extension() != Some(&OsStr::new("pass")) { + if path.extension() != Some(OsStr::new("pass")) { return Err(format!( "Only creates a password file if that file ends in .pass: {:?}", path @@ -189,7 +189,7 @@ pub fn create_wallet_from_mnemonic( .create_wallet( wallet_name, wallet_type, - &mnemonic, + mnemonic, wallet_password.as_bytes(), ) .map_err(|e| format!("Unable to create wallet: {:?}", e))?; diff --git a/account_manager/src/wallet/recover.rs b/account_manager/src/wallet/recover.rs index ec4cbb2ada1..f107c3638cc 100644 --- a/account_manager/src/wallet/recover.rs +++ b/account_manager/src/wallet/recover.rs @@ -71,7 +71,7 @@ pub fn cli_run(matches: &ArgMatches, wallet_base_dir: PathBuf) -> Result<(), Str let mnemonic = read_mnemonic_from_cli(mnemonic_path, stdin_inputs)?; - let wallet = create_wallet_from_mnemonic(matches, &wallet_base_dir.as_path(), &mnemonic) + let wallet = create_wallet_from_mnemonic(matches, wallet_base_dir.as_path(), &mnemonic) .map_err(|e| format!("Unable to create wallet: {:?}", e))?; println!("Your wallet has been successfully recovered."); diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index f170ee86f51..2b6feec5aad 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -26,7 +26,7 @@ slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_tr slog-term = "2.6.0" slog-async = "2.5.0" ctrlc = { version = "3.1.6", features = ["termination"] } -tokio = { version = "1.1.0", features = ["time"] } +tokio = { version = "1.7.1", features = ["time"] } exit-future = "0.2.0" dirs = "3.0.1" logging = { path = "../common/logging" } diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index a7cbc2061fb..21f6711960c 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -40,7 +40,7 @@ eth2_ssz_derive = "0.1.0" state_processing = { path = "../../consensus/state_processing" } tree_hash = "0.1.1" types = { path = "../../consensus/types" } -tokio = "1.1.0" +tokio = "1.7.1" eth1 = { path = "../eth1" } futures = "0.3.7" genesis = { path = "../genesis" } diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index ee0e0289649..47a3c99834c 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -31,7 +31,7 @@ use crate::{ HEAD_LOCK_TIMEOUT, MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, }, metrics, - observed_attestations::ObserveOutcome, + observed_aggregates::ObserveOutcome, observed_attesters::Error as ObservedAttestersError, BeaconChain, BeaconChainError, BeaconChainTypes, }; @@ -454,7 +454,7 @@ impl<'a, T: BeaconChainTypes> PartiallyVerifiedAggregatedAttestation<'a, T> { match chain .observed_aggregators .read() - .validator_has_been_observed(attestation, aggregator_index as usize) + .validator_has_been_observed(attestation.data.target.epoch, aggregator_index as usize) { Ok(true) => Err(Error::AggregatorAlreadyKnown(aggregator_index)), Ok(false) => Ok(()), @@ -474,7 +474,7 @@ impl<'a, T: BeaconChainTypes> PartiallyVerifiedAggregatedAttestation<'a, T> { // // Attestations must be for a known block. If the block is unknown, we simply drop the // attestation and do not delay consideration for later. - let head_block = verify_head_block_is_known(chain, &attestation, None)?; + let head_block = verify_head_block_is_known(chain, attestation, None)?; // Check the attestation target root is consistent with the head root. // @@ -483,7 +483,7 @@ impl<'a, T: BeaconChainTypes> PartiallyVerifiedAggregatedAttestation<'a, T> { // // Whilst this attestation *technically* could be used to add value to a block, it is // invalid in the spirit of the protocol. Here we choose safety over profit. - verify_attestation_target_root::(&head_block, &attestation)?; + verify_attestation_target_root::(&head_block, attestation)?; // Ensure that the attestation has participants. if attestation.aggregation_bits.is_zero() { @@ -684,7 +684,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedAggregatedAttestation<'a, T> { if let ObserveOutcome::AlreadyKnown = chain .observed_attestations .write() - .observe_attestation(attestation, Some(attestation_root)) + .observe_item(attestation, Some(attestation_root)) .map_err(|e| Error::BeaconChainError(e.into()))? { return Err(Error::AttestationAlreadyKnown(attestation_root)); @@ -697,7 +697,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedAggregatedAttestation<'a, T> { if chain .observed_aggregators .write() - .observe_validator(&attestation, aggregator_index as usize) + .observe_validator(attestation.data.target.epoch, aggregator_index as usize) .map_err(BeaconChainError::from)? { return Err(Error::PriorAttestationKnown { @@ -802,7 +802,7 @@ impl<'a, T: BeaconChainTypes> PartiallyVerifiedUnaggregatedAttestation<'a, T> { // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance). // // We do not queue future attestations for later processing. - verify_propagation_slot_range(chain, &attestation)?; + verify_propagation_slot_range(chain, attestation)?; // Check to ensure that the attestation is "unaggregated". I.e., it has exactly one // aggregation bit set. @@ -816,10 +816,10 @@ impl<'a, T: BeaconChainTypes> PartiallyVerifiedUnaggregatedAttestation<'a, T> { // // Enforce a maximum skip distance for unaggregated attestations. let head_block = - verify_head_block_is_known(chain, &attestation, chain.config.import_max_skip_slots)?; + verify_head_block_is_known(chain, attestation, chain.config.import_max_skip_slots)?; // Check the attestation target root is consistent with the head root. - verify_attestation_target_root::(&head_block, &attestation)?; + verify_attestation_target_root::(&head_block, attestation)?; Ok(()) } @@ -861,7 +861,7 @@ impl<'a, T: BeaconChainTypes> PartiallyVerifiedUnaggregatedAttestation<'a, T> { if chain .observed_attesters .read() - .validator_has_been_observed(&attestation, validator_index as usize) + .validator_has_been_observed(attestation.data.target.epoch, validator_index as usize) .map_err(BeaconChainError::from)? { return Err(Error::PriorAttestationKnown { @@ -1056,7 +1056,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedUnaggregatedAttestation<'a, T> { if chain .observed_attesters .write() - .observe_validator(&attestation, validator_index as usize) + .observe_validator(attestation.data.target.epoch, validator_index as usize) .map_err(BeaconChainError::from)? { return Err(Error::PriorAttestationKnown { @@ -1234,15 +1234,13 @@ pub fn verify_attestation_signature( .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?; let fork = chain - .canonical_head - .try_read_for(HEAD_LOCK_TIMEOUT) - .ok_or(BeaconChainError::CanonicalHeadLockTimeout) - .map(|head| head.beacon_state.fork())?; + .spec + .fork_at_epoch(indexed_attestation.data.target.epoch); let signature_set = indexed_attestation_signature_set_from_pubkeys( |validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed), &indexed_attestation.signature, - &indexed_attestation, + indexed_attestation, &fork, chain.genesis_validators_root, &chain.spec, @@ -1340,15 +1338,13 @@ pub fn verify_signed_aggregate_signatures( } let fork = chain - .canonical_head - .try_read_for(HEAD_LOCK_TIMEOUT) - .ok_or(BeaconChainError::CanonicalHeadLockTimeout) - .map(|head| head.beacon_state.fork())?; + .spec + .fork_at_epoch(indexed_attestation.data.target.epoch); let signature_sets = vec![ signed_aggregate_selection_proof_signature_set( |validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed), - &signed_aggregate, + signed_aggregate, &fork, chain.genesis_validators_root, &chain.spec, @@ -1356,7 +1352,7 @@ pub fn verify_signed_aggregate_signatures( .map_err(BeaconChainError::SignatureSetError)?, signed_aggregate_signature_set( |validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed), - &signed_aggregate, + signed_aggregate, &fork, chain.genesis_validators_root, &chain.spec, @@ -1365,7 +1361,7 @@ pub fn verify_signed_aggregate_signatures( indexed_attestation_signature_set_from_pubkeys( |validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed), &indexed_attestation.signature, - &indexed_attestation, + indexed_attestation, &fork, chain.genesis_validators_root, &chain.spec, @@ -1386,7 +1382,7 @@ fn obtain_indexed_attestation_and_committees_per_slot( attestation: &Attestation, ) -> Result<(IndexedAttestation, CommitteesPerSlot), Error> { map_attestation_committee(chain, attestation, |(committee, committees_per_slot)| { - get_indexed_attestation(committee.committee, &attestation) + get_indexed_attestation(committee.committee, attestation) .map(|attestation| (attestation, committees_per_slot)) .map_err(Error::Invalid) }) diff --git a/beacon_node/beacon_chain/src/attester_cache.rs b/beacon_node/beacon_chain/src/attester_cache.rs new file mode 100644 index 00000000000..01662efc135 --- /dev/null +++ b/beacon_node/beacon_chain/src/attester_cache.rs @@ -0,0 +1,377 @@ +//! This module provides the `AttesterCache`, a cache designed for reducing state-reads when +//! validators produce `AttestationData`. +//! +//! This cache is required *as well as* the `ShufflingCache` since the `ShufflingCache` does not +//! provide any information about the `state.current_justified_checkpoint`. It is not trivial to add +//! the justified checkpoint to the `ShufflingCache` since that cache is keyed by shuffling decision +//! root, which is not suitable for the justified checkpoint. Whilst we can know the shuffling for +//! epoch `n` during `n - 1`, we *cannot* know the justified checkpoint. Instead, we *must* perform +//! `per_epoch_processing` to transform the state from epoch `n - 1` to epoch `n` so that rewards +//! and penalties can be computed and the `state.current_justified_checkpoint` can be updated. + +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use parking_lot::RwLock; +use state_processing::state_advance::{partial_state_advance, Error as StateAdvanceError}; +use std::collections::HashMap; +use std::ops::Range; +use types::{ + beacon_state::{ + compute_committee_index_in_epoch, compute_committee_range_in_epoch, epoch_committee_count, + }, + BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, RelativeEpoch, + Slot, +}; + +type JustifiedCheckpoint = Checkpoint; +type CommitteeLength = usize; +type CommitteeIndex = u64; +type CacheHashMap = HashMap; + +/// The maximum number of `AttesterCacheValues` to be kept in memory. +/// +/// Each `AttesterCacheValues` is very small (~16 bytes) and the cache will generally be kept small +/// by pruning on finality. +/// +/// The value provided here is much larger than will be used during ideal network conditions, +/// however we make it large since the values are so small. +const MAX_CACHE_LEN: usize = 1_024; + +#[derive(Debug)] +pub enum Error { + BeaconState(BeaconStateError), + // Boxed to avoid an infinite-size recursion issue. + BeaconChain(Box), + MissingBeaconState(Hash256), + FailedToTransitionState(StateAdvanceError), + CannotAttestToFutureState { + state_slot: Slot, + request_slot: Slot, + }, + /// Indicates a cache inconsistency. + WrongEpoch { + request_epoch: Epoch, + epoch: Epoch, + }, + InvalidCommitteeIndex { + committee_index: u64, + }, + /// Indicates an inconsistency with the beacon state committees. + InverseRange { + range: Range, + }, +} + +impl From for Error { + fn from(e: BeaconStateError) -> Self { + Error::BeaconState(e) + } +} + +impl From for Error { + fn from(e: BeaconChainError) -> Self { + Error::BeaconChain(Box::new(e)) + } +} + +/// Stores the minimal amount of data required to compute the committee length for any committee at any +/// slot in a given `epoch`. +struct CommitteeLengths { + /// The `epoch` to which the lengths pertain. + epoch: Epoch, + /// The length of the shuffling in `self.epoch`. + active_validator_indices_len: usize, +} + +impl CommitteeLengths { + /// Instantiate `Self` using `state.current_epoch()`. + fn new(state: &BeaconState, spec: &ChainSpec) -> Result { + let active_validator_indices_len = if let Ok(committee_cache) = + state.committee_cache(RelativeEpoch::Current) + { + committee_cache.active_validator_indices().len() + } else { + // Building the cache like this avoids taking a mutable reference to `BeaconState`. + let committee_cache = state.initialize_committee_cache(state.current_epoch(), spec)?; + committee_cache.active_validator_indices().len() + }; + + Ok(Self { + epoch: state.current_epoch(), + active_validator_indices_len, + }) + } + + /// Get the length of the committee at the given `slot` and `committee_index`. + fn get( + &self, + slot: Slot, + committee_index: CommitteeIndex, + spec: &ChainSpec, + ) -> Result { + let slots_per_epoch = T::slots_per_epoch(); + let request_epoch = slot.epoch(slots_per_epoch); + + // Sanity check. + if request_epoch != self.epoch { + return Err(Error::WrongEpoch { + request_epoch, + epoch: self.epoch, + }); + } + + let slots_per_epoch = slots_per_epoch as usize; + let committees_per_slot = + T::get_committee_count_per_slot(self.active_validator_indices_len, spec)?; + let index_in_epoch = compute_committee_index_in_epoch( + slot, + slots_per_epoch, + committees_per_slot, + committee_index as usize, + ); + let range = compute_committee_range_in_epoch( + epoch_committee_count(committees_per_slot, slots_per_epoch), + index_in_epoch, + self.active_validator_indices_len, + ) + .ok_or(Error::InvalidCommitteeIndex { committee_index })?; + + range + .end + .checked_sub(range.start) + .ok_or(Error::InverseRange { range }) + } +} + +/// Provides the following information for some epoch: +/// +/// - The `state.current_justified_checkpoint` value. +/// - The committee lengths for all indices and slots. +/// +/// These values are used during attestation production. +pub struct AttesterCacheValue { + current_justified_checkpoint: Checkpoint, + committee_lengths: CommitteeLengths, +} + +impl AttesterCacheValue { + /// Instantiate `Self` using `state.current_epoch()`. + pub fn new(state: &BeaconState, spec: &ChainSpec) -> Result { + let current_justified_checkpoint = state.current_justified_checkpoint(); + let committee_lengths = CommitteeLengths::new(state, spec)?; + Ok(Self { + current_justified_checkpoint, + committee_lengths, + }) + } + + /// Get the justified checkpoint and committee length for some `slot` and `committee_index`. + fn get( + &self, + slot: Slot, + committee_index: CommitteeIndex, + spec: &ChainSpec, + ) -> Result<(JustifiedCheckpoint, CommitteeLength), Error> { + self.committee_lengths + .get::(slot, committee_index, spec) + .map(|committee_length| (self.current_justified_checkpoint, committee_length)) + } +} + +/// The `AttesterCacheKey` is fundamentally the same thing as the proposer shuffling decision root, +/// however here we use it as an identity for both of the following values: +/// +/// 1. The `state.current_justified_checkpoint`. +/// 2. The attester shuffling. +/// +/// This struct relies upon the premise that the `state.current_justified_checkpoint` in epoch `n` +/// is determined by the root of the latest block in epoch `n - 1`. Notably, this is identical to +/// how the proposer shuffling is keyed in `BeaconProposerCache`. +/// +/// It is also safe, but not maximally efficient, to key the attester shuffling with the same +/// strategy. For better shuffling keying strategies, see the `ShufflingCache`. +#[derive(Eq, PartialEq, Hash, Clone, Copy)] +pub struct AttesterCacheKey { + /// The epoch from which the justified checkpoint should be observed. + /// + /// Attestations which use `self.epoch` as `target.epoch` should use this key. + epoch: Epoch, + /// The root of the block at the last slot of `self.epoch - 1`. + decision_root: Hash256, +} + +impl AttesterCacheKey { + /// Instantiate `Self` to key `state.current_epoch()`. + /// + /// The `latest_block_root` should be the latest block that has been applied to `state`. This + /// parameter is required since the state does not store the block root for any block with the + /// same slot as `state.slot()`. + /// + /// ## Errors + /// + /// May error if `epoch` is out of the range of `state.block_roots`. + pub fn new( + epoch: Epoch, + state: &BeaconState, + latest_block_root: Hash256, + ) -> Result { + let slots_per_epoch = T::slots_per_epoch(); + let decision_slot = epoch.start_slot(slots_per_epoch).saturating_sub(1_u64); + + let decision_root = if decision_slot.epoch(slots_per_epoch) == epoch { + // This scenario is only possible during the genesis epoch. In this scenario, all-zeros + // is used as an alias to the genesis block. + Hash256::zero() + } else if epoch > state.current_epoch() { + // If the requested epoch is higher than the current epoch, the latest block will always + // be the decision root. + latest_block_root + } else { + *state.get_block_root(decision_slot)? + }; + + Ok(Self { + epoch, + decision_root, + }) + } +} + +/// Provides a cache for the justified checkpoint and committee length when producing an +/// attestation. +/// +/// See the module-level documentation for more information. +#[derive(Default)] +pub struct AttesterCache { + cache: RwLock, +} + +impl AttesterCache { + /// Get the justified checkpoint and committee length for the `slot` and `committee_index` in + /// the state identified by the cache `key`. + pub fn get( + &self, + key: &AttesterCacheKey, + slot: Slot, + committee_index: CommitteeIndex, + spec: &ChainSpec, + ) -> Result, Error> { + self.cache + .read() + .get(key) + .map(|cache_item| cache_item.get::(slot, committee_index, spec)) + .transpose() + } + + /// Cache the `state.current_epoch()` values if they are not already present in the state. + pub fn maybe_cache_state( + &self, + state: &BeaconState, + latest_block_root: Hash256, + spec: &ChainSpec, + ) -> Result<(), Error> { + let key = AttesterCacheKey::new(state.current_epoch(), state, latest_block_root)?; + let mut cache = self.cache.write(); + if !cache.contains_key(&key) { + let cache_item = AttesterCacheValue::new(state, spec)?; + Self::insert_respecting_max_len(&mut cache, key, cache_item); + } + Ok(()) + } + + /// Read the state identified by `state_root` from the database, advance it to the required + /// slot, use it to prime the cache and return the values for the provided `slot` and + /// `committee_index`. + /// + /// ## Notes + /// + /// This function takes a write-lock on the internal cache. Prefer attempting a `Self::get` call + /// before running this function as `Self::get` only takes a read-lock and is therefore less + /// likely to create contention. + pub fn load_and_cache_state( + &self, + state_root: Hash256, + key: AttesterCacheKey, + slot: Slot, + committee_index: CommitteeIndex, + chain: &BeaconChain, + ) -> Result<(JustifiedCheckpoint, CommitteeLength), Error> { + let spec = &chain.spec; + let slots_per_epoch = T::EthSpec::slots_per_epoch(); + let epoch = slot.epoch(slots_per_epoch); + + // Take a write-lock on the cache before starting the state read. + // + // Whilst holding the write-lock during the state read will create contention, it prevents + // the scenario where multiple requests from separate threads cause duplicate state reads. + let mut cache = self.cache.write(); + + // Try the cache to see if someone has already primed it between the time the function was + // called and when the cache write-lock was obtained. This avoids performing duplicate state + // reads. + if let Some(value) = cache + .get(&key) + .map(|cache_item| cache_item.get::(slot, committee_index, spec)) + .transpose()? + { + return Ok(value); + } + + let mut state: BeaconState = chain + .get_state(&state_root, None)? + .ok_or(Error::MissingBeaconState(state_root))?; + + if state.slot() > slot { + // This indicates an internal inconsistency. + return Err(Error::CannotAttestToFutureState { + state_slot: state.slot(), + request_slot: slot, + }); + } else if state.current_epoch() < epoch { + // Only perform a "partial" state advance since we do not require the state roots to be + // accurate. + partial_state_advance( + &mut state, + Some(state_root), + epoch.start_slot(slots_per_epoch), + spec, + ) + .map_err(Error::FailedToTransitionState)?; + state.build_committee_cache(RelativeEpoch::Current, spec)?; + } + + let cache_item = AttesterCacheValue::new(&state, spec)?; + let value = cache_item.get::(slot, committee_index, spec)?; + Self::insert_respecting_max_len(&mut cache, key, cache_item); + Ok(value) + } + + /// Insert a value to `cache`, ensuring it does not exceed the maximum length. + /// + /// If the cache is already full, the item with the lowest epoch will be removed. + fn insert_respecting_max_len( + cache: &mut CacheHashMap, + key: AttesterCacheKey, + value: AttesterCacheValue, + ) { + while cache.len() >= MAX_CACHE_LEN { + if let Some(oldest) = cache + .iter() + .map(|(key, _)| *key) + .min_by_key(|key| key.epoch) + { + cache.remove(&oldest); + } else { + break; + } + } + + cache.insert(key, value); + } + + /// Remove all entries where the `key.epoch` is lower than the given `epoch`. + /// + /// Generally, the provided `epoch` should be the finalized epoch. + pub fn prune_below(&self, epoch: Epoch) { + self.cache.write().retain(|target, _| target.epoch >= epoch); + } +} diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index b4769c89526..0a13417e579 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4,6 +4,7 @@ use crate::attestation_verification::{ FullyVerifiedUnaggregatedAttestation, PartiallyVerifiedAggregatedAttestation, PartiallyVerifiedUnaggregatedAttestation, SignatureVerifiedAttestation, }; +use crate::attester_cache::{AttesterCache, AttesterCacheKey}; use crate::beacon_proposer_cache::BeaconProposerCache; use crate::block_verification::{ check_block_is_finalized_descendant, check_block_relevancy, get_block_root, @@ -16,15 +17,25 @@ use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend}; use crate::events::ServerSentEventHandler; use crate::head_tracker::HeadTracker; use crate::migrate::BackgroundMigrator; -use crate::naive_aggregation_pool::{Error as NaiveAggregationError, NaiveAggregationPool}; -use crate::observed_attestations::{Error as AttestationObservationError, ObservedAttestations}; -use crate::observed_attesters::{ObservedAggregators, ObservedAttesters}; +use crate::naive_aggregation_pool::{ + AggregatedAttestationMap, Error as NaiveAggregationError, NaiveAggregationPool, + SyncContributionAggregateMap, +}; +use crate::observed_aggregates::{ + Error as AttestationObservationError, ObservedAggregateAttestations, ObservedSyncContributions, +}; +use crate::observed_attesters::{ + ObservedAggregators, ObservedAttesters, ObservedSyncAggregators, ObservedSyncContributors, +}; use crate::observed_block_producers::ObservedBlockProducers; use crate::observed_operations::{ObservationOutcome, ObservedOperations}; use crate::persisted_beacon_chain::{PersistedBeaconChain, DUMMY_CANONICAL_HEAD_BLOCK_ROOT}; use crate::persisted_fork_choice::PersistedForkChoice; use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; use crate::snapshot_cache::SnapshotCache; +use crate::sync_committee_verification::{ + Error as SyncCommitteeError, VerifiedSyncCommitteeMessage, VerifiedSyncContribution, +}; use crate::timeout_rw_lock::TimeoutRwLock; use crate::validator_monitor::{ get_block_delay_ms, get_slot_delay_ms, timestamp_now, ValidatorMonitor, @@ -41,6 +52,7 @@ use itertools::process_results; use itertools::Itertools; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::{Mutex, RwLock}; +use safe_arith::SafeArith; use slasher::Slasher; use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; @@ -223,14 +235,28 @@ pub struct BeaconChain { /// /// This pool accepts `Attestation` objects that only have one aggregation bit set and provides /// a method to get an aggregated `Attestation` for some `AttestationData`. - pub naive_aggregation_pool: RwLock>, + pub naive_aggregation_pool: RwLock>>, + /// A pool of `SyncCommitteeContribution` dedicated to the "naive aggregation strategy" defined in the eth2 + /// specs. + /// + /// This pool accepts `SyncCommitteeContribution` objects that only have one aggregation bit set and provides + /// a method to get an aggregated `SyncCommitteeContribution` for some `SyncCommitteeContributionData`. + pub naive_sync_aggregation_pool: + RwLock>>, /// Contains a store of attestations which have been observed by the beacon chain. - pub(crate) observed_attestations: RwLock>, + pub(crate) observed_attestations: RwLock>, + /// Contains a store of sync contributions which have been observed by the beacon chain. + pub(crate) observed_sync_contributions: RwLock>, /// Maintains a record of which validators have been seen to attest in recent epochs. pub(crate) observed_attesters: RwLock>, + /// Maintains a record of which validators have been seen sending sync messages in recent epochs. + pub(crate) observed_sync_contributors: RwLock>, /// Maintains a record of which validators have been seen to create `SignedAggregateAndProofs` /// in recent epochs. pub(crate) observed_aggregators: RwLock>, + /// Maintains a record of which validators have been seen to create `SignedContributionAndProofs` + /// in recent epochs. + pub(crate) observed_sync_aggregators: RwLock>, /// Maintains a record of which validators have proposed blocks for each slot. pub(crate) observed_block_producers: RwLock>, /// Maintains a record of which validators have submitted voluntary exits. @@ -266,6 +292,8 @@ pub struct BeaconChain { pub beacon_proposer_cache: Mutex, /// Caches a map of `validator_index -> validator_pubkey`. pub(crate) validator_pubkey_cache: TimeoutRwLock>, + /// A cache used when producing attestations. + pub(crate) attester_cache: Arc, /// A list of any hard-coded forks that have been disabled. pub disabled_forks: Vec, /// Sender given to tasks, so that if they encounter a state in which execution cannot @@ -825,6 +853,80 @@ impl BeaconChain { }) } + /// Return the sync committee at `slot + 1` from the canonical chain. + /// + /// This is useful when dealing with sync committee messages, because messages are signed + /// and broadcast one slot prior to the slot of the sync committee (which is relevant at + /// sync committee period boundaries). + pub fn sync_committee_at_next_slot( + &self, + slot: Slot, + ) -> Result>, Error> { + let epoch = slot.safe_add(1)?.epoch(T::EthSpec::slots_per_epoch()); + self.sync_committee_at_epoch(epoch) + } + + /// Return the sync committee at `epoch` from the canonical chain. + pub fn sync_committee_at_epoch( + &self, + epoch: Epoch, + ) -> Result>, Error> { + // Try to read a committee from the head. This will work most of the time, but will fail + // for faraway committees, or if there are skipped slots at the transition to Altair. + let spec = &self.spec; + let committee_from_head = + self.with_head( + |head| match head.beacon_state.get_built_sync_committee(epoch, spec) { + Ok(committee) => Ok(Some(committee.clone())), + Err(BeaconStateError::SyncCommitteeNotKnown { .. }) + | Err(BeaconStateError::IncorrectStateVariant) => Ok(None), + Err(e) => Err(Error::from(e)), + }, + )?; + + if let Some(committee) = committee_from_head { + Ok(committee) + } else { + // Slow path: load a state (or advance the head). + let sync_committee_period = epoch.sync_committee_period(spec)?; + let committee = self + .state_for_sync_committee_period(sync_committee_period)? + .get_built_sync_committee(epoch, spec)? + .clone(); + Ok(committee) + } + } + + /// Load a state suitable for determining the sync committee for the given period. + /// + /// Specifically, the state at the start of the *previous* sync committee period. + /// + /// This is sufficient for historical duties, and efficient in the case where the head + /// is lagging the current period and we need duties for the next period (because we only + /// have to transition the head to start of the current period). + /// + /// We also need to ensure that the load slot is after the Altair fork. + /// + /// **WARNING**: the state returned will have dummy state roots. It should only be used + /// for its sync committees (determining duties, etc). + pub fn state_for_sync_committee_period( + &self, + sync_committee_period: u64, + ) -> Result, Error> { + let altair_fork_epoch = self + .spec + .altair_fork_epoch + .ok_or(Error::AltairForkDisabled)?; + + let load_slot = std::cmp::max( + self.spec.epochs_per_sync_committee_period * sync_committee_period.saturating_sub(1), + altair_fork_epoch, + ) + .start_slot(T::EthSpec::slots_per_epoch()); + + self.state_at_slot(load_slot, StateSkipConfig::WithoutStateRoots) + } + /// Returns info representing the head block and state. /// /// A summarized version of `Self::head` that involves less cloning. @@ -1120,44 +1222,174 @@ impl BeaconChain { /// validator that is in the committee for `slot` and `index` in the canonical chain. /// /// Always attests to the canonical chain. + /// + /// ## Errors + /// + /// May return an error if the `request_slot` is too far behind the head state. pub fn produce_unaggregated_attestation( &self, - slot: Slot, - index: CommitteeIndex, + request_slot: Slot, + request_index: CommitteeIndex, ) -> Result, Error> { - // Note: we're taking a lock on the head. The work involved here should be trivial enough - // that the lock should not be held for long. - let head = self - .canonical_head - .try_read_for(HEAD_LOCK_TIMEOUT) - .ok_or(Error::CanonicalHeadLockTimeout)?; + let _total_timer = metrics::start_timer(&metrics::ATTESTATION_PRODUCTION_SECONDS); - if slot >= head.beacon_block.slot() { - self.produce_unaggregated_attestation_for_block( - slot, - index, - head.beacon_block_root, - Cow::Borrowed(&head.beacon_state), - head.beacon_state_root(), - ) + let slots_per_epoch = T::EthSpec::slots_per_epoch(); + let request_epoch = request_slot.epoch(slots_per_epoch); + + /* + * Phase 1/2: + * + * Take a short-lived read-lock on the head and copy the necessary information from it. + * + * It is important that this first phase is as quick as possible; creating contention for + * the head-lock is not desirable. + */ + + let head_state_slot; + let beacon_block_root; + let beacon_state_root; + let target; + let current_epoch_attesting_info: Option<(Checkpoint, usize)>; + let attester_cache_key; + let head_timer = metrics::start_timer(&metrics::ATTESTATION_PRODUCTION_HEAD_SCRAPE_SECONDS); + if let Some(head) = self.canonical_head.try_read_for(HEAD_LOCK_TIMEOUT) { + let head_state = &head.beacon_state; + head_state_slot = head_state.slot(); + + // There is no value in producing an attestation to a block that is pre-finalization and + // it is likely to cause expensive and pointless reads to the freezer database. Exit + // early if this is the case. + let finalized_slot = head_state + .finalized_checkpoint() + .epoch + .start_slot(slots_per_epoch); + if request_slot < finalized_slot { + return Err(Error::AttestingToFinalizedSlot { + finalized_slot, + request_slot, + }); + } + + // This function will eventually fail when trying to access a slot which is + // out-of-bounds of `state.block_roots`. This explicit error is intended to provide a + // clearer message to the user than an ambiguous `SlotOutOfBounds` error. + let slots_per_historical_root = T::EthSpec::slots_per_historical_root() as u64; + let lowest_permissible_slot = + head_state.slot().saturating_sub(slots_per_historical_root); + if request_slot < lowest_permissible_slot { + return Err(Error::AttestingToAncientSlot { + lowest_permissible_slot, + request_slot, + }); + } + + if request_slot >= head_state.slot() { + // When attesting to the head slot or later, always use the head of the chain. + beacon_block_root = head.beacon_block_root; + beacon_state_root = head.beacon_state_root(); + } else { + // Permit attesting to slots *prior* to the current head. This is desirable when + // the VC and BN are out-of-sync due to time issues or overloading. + beacon_block_root = *head_state.get_block_root(request_slot)?; + beacon_state_root = *head_state.get_state_root(request_slot)?; + }; + + let target_slot = request_epoch.start_slot(T::EthSpec::slots_per_epoch()); + let target_root = if head_state.slot() <= target_slot { + // If the state is earlier than the target slot then the target *must* be the head + // block root. + beacon_block_root + } else { + *head_state.get_block_root(target_slot)? + }; + target = Checkpoint { + epoch: request_epoch, + root: target_root, + }; + + current_epoch_attesting_info = if head_state.current_epoch() == request_epoch { + // When the head state is in the same epoch as the request, all the information + // required to attest is available on the head state. + Some(( + head_state.current_justified_checkpoint(), + head_state + .get_beacon_committee(request_slot, request_index)? + .committee + .len(), + )) + } else { + // If the head state is in a *different* epoch to the request, more work is required + // to determine the justified checkpoint and committee length. + None + }; + + // Determine the key for `self.attester_cache`, in case it is required later in this + // routine. + attester_cache_key = + AttesterCacheKey::new(request_epoch, head_state, beacon_block_root)?; } else { - // We disallow producing attestations *prior* to the current head since such an - // attestation would require loading a `BeaconState` from disk. Loading `BeaconState` - // from disk is very resource intensive and proposes a DoS risk from validator clients. - // - // Although we generally allow validator clients to do things that might harm us (i.e., - // we trust them), sometimes we need to protect the BN from accidental errors which - // could cause it significant harm. - // - // This case is particularity harmful since the HTTP API can effectively call this - // function an unlimited amount of times. If `n` validators all happen to call it at - // the same time, we're going to load `n` states (and tree hash caches) into memory all - // at once. With `n >= 10` we're looking at hundreds of MB or GBs of RAM. - Err(Error::AttestingPriorToHead { - head_slot: head.beacon_block.slot(), - request_slot: slot, - }) + return Err(Error::CanonicalHeadLockTimeout); } + drop(head_timer); + + /* + * Phase 2/2: + * + * If the justified checkpoint and committee length from the head are suitable for this + * attestation, use them. If not, try the attester cache. If the cache misses, load a state + * from disk and prime the cache with it. + */ + + let cache_timer = + metrics::start_timer(&metrics::ATTESTATION_PRODUCTION_CACHE_INTERACTION_SECONDS); + let (justified_checkpoint, committee_len) = + if let Some((justified_checkpoint, committee_len)) = current_epoch_attesting_info { + // The head state is in the same epoch as the attestation, so there is no more + // required information. + (justified_checkpoint, committee_len) + } else if let Some(cached_values) = self.attester_cache.get::( + &attester_cache_key, + request_slot, + request_index, + &self.spec, + )? { + // The suitable values were already cached. Return them. + cached_values + } else { + debug!( + self.log, + "Attester cache miss"; + "beacon_block_root" => ?beacon_block_root, + "head_state_slot" => %head_state_slot, + "request_slot" => %request_slot, + ); + + // Neither the head state, nor the attester cache was able to produce the required + // information to attest in this epoch. So, load a `BeaconState` from disk and use + // it to fulfil the request (and prime the cache to avoid this next time). + let _cache_build_timer = + metrics::start_timer(&metrics::ATTESTATION_PRODUCTION_CACHE_PRIME_SECONDS); + self.attester_cache.load_and_cache_state( + beacon_state_root, + attester_cache_key, + request_slot, + request_index, + self, + )? + }; + drop(cache_timer); + + Ok(Attestation { + aggregation_bits: BitList::with_capacity(committee_len)?, + data: AttestationData { + slot: request_slot, + index: request_index, + beacon_block_root, + source: justified_checkpoint, + target, + }, + signature: AggregateSignature::empty(), + }) } /// Produces an "unaggregated" attestation for the given `slot` and `index` that attests to @@ -1305,6 +1537,36 @@ impl BeaconChain { }) } + /// Accepts some `SyncCommitteeMessage` from the network and attempts to verify it, returning `Ok(_)` if + /// it is valid to be (re)broadcast on the gossip network. + pub fn verify_sync_committee_message_for_gossip( + &self, + sync_message: SyncCommitteeMessage, + subnet_id: SyncSubnetId, + ) -> Result { + metrics::inc_counter(&metrics::SYNC_MESSAGE_PROCESSING_REQUESTS); + let _timer = metrics::start_timer(&metrics::SYNC_MESSAGE_GOSSIP_VERIFICATION_TIMES); + + VerifiedSyncCommitteeMessage::verify(sync_message, subnet_id, self).map(|v| { + metrics::inc_counter(&metrics::SYNC_MESSAGE_PROCESSING_SUCCESSES); + v + }) + } + + /// Accepts some `SignedContributionAndProof` from the network and attempts to verify it, + /// returning `Ok(_)` if it is valid to be (re)broadcast on the gossip network. + pub fn verify_sync_contribution_for_gossip( + &self, + sync_contribution: SignedContributionAndProof, + ) -> Result, SyncCommitteeError> { + metrics::inc_counter(&metrics::SYNC_CONTRIBUTION_PROCESSING_REQUESTS); + let _timer = metrics::start_timer(&metrics::SYNC_CONTRIBUTION_GOSSIP_VERIFICATION_TIMES); + VerifiedSyncContribution::verify(sync_contribution, self).map(|v| { + metrics::inc_counter(&metrics::SYNC_CONTRIBUTION_PROCESSING_SUCCESSES); + v + }) + } + /// Accepts some attestation-type object and attempts to verify it in the context of fork /// choice. If it is valid it is applied to `self.fork_choice`. /// @@ -1374,6 +1636,70 @@ impl BeaconChain { Ok(unaggregated_attestation) } + /// Accepts a `VerifiedSyncCommitteeMessage` and attempts to apply it to the "naive + /// aggregation pool". + /// + /// The naive aggregation pool is used by local validators to produce + /// `SignedContributionAndProof`. + /// + /// If the sync message is too old (low slot) to be included in the pool it is simply dropped + /// and no error is returned. + pub fn add_to_naive_sync_aggregation_pool( + &self, + verified_sync_committee_message: VerifiedSyncCommitteeMessage, + ) -> Result { + let sync_message = verified_sync_committee_message.sync_message(); + let positions_by_subnet_id: &HashMap> = + verified_sync_committee_message.subnet_positions(); + for (subnet_id, positions) in positions_by_subnet_id.iter() { + for position in positions { + let _timer = + metrics::start_timer(&metrics::SYNC_CONTRIBUTION_PROCESSING_APPLY_TO_AGG_POOL); + let contribution = SyncCommitteeContribution::from_message( + sync_message, + subnet_id.into(), + *position, + )?; + + match self + .naive_sync_aggregation_pool + .write() + .insert(&contribution) + { + Ok(outcome) => trace!( + self.log, + "Stored unaggregated sync committee message"; + "outcome" => ?outcome, + "index" => sync_message.validator_index, + "slot" => sync_message.slot.as_u64(), + ), + Err(NaiveAggregationError::SlotTooLow { + slot, + lowest_permissible_slot, + }) => { + trace!( + self.log, + "Refused to store unaggregated sync committee message"; + "lowest_permissible_slot" => lowest_permissible_slot.as_u64(), + "slot" => slot.as_u64(), + ); + } + Err(e) => { + error!( + self.log, + "Failed to store unaggregated sync committee message"; + "error" => ?e, + "index" => sync_message.validator_index, + "slot" => sync_message.slot.as_u64(), + ); + return Err(Error::from(e).into()); + } + }; + } + } + Ok(verified_sync_committee_message) + } + /// Accepts a `FullyVerifiedAggregatedAttestation` and attempts to apply it to `self.op_pool`. /// /// The op pool is used by local block producers to pack blocks with operations. @@ -1403,6 +1729,26 @@ impl BeaconChain { Ok(signed_aggregate) } + /// Accepts a `VerifiedSyncContribution` and attempts to apply it to `self.op_pool`. + /// + /// The op pool is used by local block producers to pack blocks with operations. + pub fn add_contribution_to_block_inclusion_pool( + &self, + contribution: VerifiedSyncContribution, + ) -> Result<(), SyncCommitteeError> { + let _timer = metrics::start_timer(&metrics::SYNC_CONTRIBUTION_PROCESSING_APPLY_TO_OP_POOL); + + // If there's no eth1 chain then it's impossible to produce blocks and therefore + // useless to put things in the op pool. + if self.eth1_chain.is_some() { + self.op_pool + .insert_sync_contribution(contribution.contribution()) + .map_err(Error::from)?; + } + + Ok(()) + } + /// Filter an attestation from the op pool for shuffling compatibility. /// /// Use the provided `filter_cache` map to memoize results. @@ -1418,7 +1764,7 @@ impl BeaconChain { self.shuffling_is_compatible( &att.data.beacon_block_root, att.data.target.epoch, - &state, + state, ) }) } @@ -1845,6 +2191,7 @@ impl BeaconChain { let block_root = fully_verified_block.block_root; let mut state = fully_verified_block.state; let current_slot = self.slot()?; + let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); let mut ops = fully_verified_block.confirmation_db_batch; let attestation_observation_timer = @@ -1853,11 +2200,7 @@ impl BeaconChain { // Iterate through the attestations in the block and register them as an "observed // attestation". This will stop us from propagating them on the gossip network. for a in signed_block.message().body().attestations() { - match self - .observed_attestations - .write() - .observe_attestation(a, None) - { + match self.observed_attestations.write().observe_item(a, None) { // If the observation was successful or if the slot for the attestation was too // low, continue. // @@ -1874,9 +2217,8 @@ impl BeaconChain { for attestation in signed_block.message().body().attestations() { let committee = state.get_beacon_committee(attestation.data.slot, attestation.data.index)?; - let indexed_attestation = - get_indexed_attestation(&committee.committee, attestation) - .map_err(|e| BlockError::BeaconChainError(e.into()))?; + let indexed_attestation = get_indexed_attestation(committee.committee, attestation) + .map_err(|e| BlockError::BeaconChainError(e.into()))?; slasher.accept_attestation(indexed_attestation); } } @@ -1912,6 +2254,17 @@ impl BeaconChain { } } + // Apply the state to the attester cache, only if it is from the previous epoch or later. + // + // In a perfect scenario there should be no need to add previous-epoch states to the cache. + // However, latency between the VC and the BN might cause the VC to produce attestations at + // a previous slot. + if state.current_epoch().saturating_add(1_u64) >= current_epoch { + self.attester_cache + .maybe_cache_state(&state, block_root, &self.spec) + .map_err(BeaconChainError::from)?; + } + let mut fork_choice = self.fork_choice.write(); // Do not import a block that doesn't descend from the finalized root. @@ -2762,6 +3115,9 @@ impl BeaconChain { self.head_tracker.clone(), )?; + self.attester_cache + .prune_below(new_finalized_checkpoint.epoch); + if let Some(event_handler) = self.event_handler.as_ref() { if event_handler.has_finalized_subscribers() { event_handler.register(EventKind::FinalizedCheckpoint(SseFinalizedCheckpoint { @@ -2945,7 +3301,7 @@ impl BeaconChain { metrics::stop_timer(committee_building_timer); - map_fn(&committee_cache, shuffling_decision_block) + map_fn(committee_cache, shuffling_decision_block) } } @@ -3122,6 +3478,28 @@ impl BeaconChain { let mut file = std::fs::File::create(file_name).unwrap(); self.dump_as_dot(&mut file); } + + /// Checks if attestations have been seen from the given `validator_index` at the + /// given `epoch`. + pub fn validator_seen_at_epoch(&self, validator_index: usize, epoch: Epoch) -> bool { + // It's necessary to assign these checks to intermediate variables to avoid a deadlock. + // + // See: https://github.com/sigp/lighthouse/pull/2230#discussion_r620013993 + let attested = self + .observed_attesters + .read() + .index_seen_at_epoch(validator_index, epoch); + let aggregated = self + .observed_aggregators + .read() + .index_seen_at_epoch(validator_index, epoch); + let produced_block = self + .observed_block_producers + .read() + .index_seen_at_epoch(validator_index as u64, epoch); + + attested || aggregated || produced_block + } } impl Drop for BeaconChain { diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 6345aac27ae..34903aed5db 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -321,7 +321,6 @@ where .deconstruct() .0; - // FIXME(altair): could remove clone with by-value `balances` accessor self.justified_balances = self .store .get_state(&justified_block.state_root(), Some(justified_block.slot())) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index a4ae722b9f0..151db27269e 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -58,21 +58,19 @@ use slot_clock::SlotClock; use ssz::Encode; use state_processing::{ block_signature_verifier::{BlockSignatureVerifier, Error as BlockSignatureVerifierError}, - per_block_processing, - per_epoch_processing::EpochProcessingSummary, - per_slot_processing, + per_block_processing, per_slot_processing, state_advance::partial_state_advance, BlockProcessingError, BlockSignatureStrategy, SlotProcessingError, }; use std::borrow::Cow; -use std::convert::TryFrom; use std::fs; use std::io::Write; use store::{Error as DBError, HotColdDB, HotStateSummary, KeyValueStore, StoreOp}; use tree_hash::TreeHash; use types::{ BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec, Hash256, - InconsistentFork, PublicKey, RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, + InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock, + SignedBeaconBlockHeader, Slot, }; /// Maximum block slot number. Block with slots bigger than this constant will NOT be processed. @@ -280,6 +278,7 @@ impl From for BlockError { } /// Information about invalid blocks which might still be slashable despite being invalid. +#[allow(clippy::enum_variant_names)] pub enum BlockSlashInfo { /// The block is invalid, but its proposer signature wasn't checked. SignatureNotChecked(SignedBeaconBlockHeader, TErr), @@ -839,7 +838,7 @@ impl IntoFullyVerifiedBlock for SignedBeaconBlock &SignedBeaconBlock { - &self + self } } @@ -971,12 +970,19 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { }; if let Some(summary) = per_slot_processing(&mut state, Some(state_root), &chain.spec)? { - summaries.push(summary) + // Expose Prometheus metrics. + if let Err(e) = summary.observe_metrics() { + error!( + chain.log, + "Failed to observe epoch summary metrics"; + "src" => "block_verification", + "error" => ?e + ); + } + summaries.push(summary); } } - expose_participation_metrics(&summaries); - // If the block is sufficiently recent, notify the validator monitor. if let Some(slot) = chain.slot_clock.now() { let epoch = slot.epoch(T::EthSpec::slots_per_epoch()); @@ -990,7 +996,15 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { // performing `per_slot_processing`. for (i, summary) in summaries.iter().enumerate() { let epoch = state.current_epoch() - Epoch::from(summaries.len() - i); - validator_monitor.process_validator_statuses(epoch, &summary.statuses); + if let Err(e) = + validator_monitor.process_validator_statuses(epoch, summary, &chain.spec) + { + error!( + chain.log, + "Failed to process validator statuses"; + "error" => ?e + ); + } } } } @@ -1191,7 +1205,7 @@ pub fn check_block_relevancy( // Do not process a block from a finalized slot. check_block_against_finalized_slot(block, chain)?; - let block_root = block_root.unwrap_or_else(|| get_block_root(&signed_block)); + let block_root = block_root.unwrap_or_else(|| get_block_root(signed_block)); // Check if the block is already known. We know it is post-finalization, so it is // sufficient to check the fork choice. @@ -1382,22 +1396,31 @@ fn get_signature_verifier<'a, T: BeaconChainTypes>( state: &'a BeaconState, validator_pubkey_cache: &'a ValidatorPubkeyCache, spec: &'a ChainSpec, -) -> BlockSignatureVerifier<'a, T::EthSpec, impl Fn(usize) -> Option> + Clone> { - BlockSignatureVerifier::new( - state, - move |validator_index| { - // Disallow access to any validator pubkeys that are not in the current beacon - // state. - if validator_index < state.validators().len() { - validator_pubkey_cache - .get(validator_index) - .map(|pk| Cow::Borrowed(pk)) - } else { - None - } - }, - spec, - ) +) -> BlockSignatureVerifier< + 'a, + T::EthSpec, + impl Fn(usize) -> Option> + Clone, + impl Fn(&'a PublicKeyBytes) -> Option>, +> { + let get_pubkey = move |validator_index| { + // Disallow access to any validator pubkeys that are not in the current beacon state. + if validator_index < state.validators().len() { + validator_pubkey_cache + .get(validator_index) + .map(Cow::Borrowed) + } else { + None + } + }; + + let decompressor = move |pk_bytes| { + // Map compressed pubkey to validator index. + let validator_index = validator_pubkey_cache.get_index(pk_bytes)?; + // Map validator index to pubkey (respecting guard on unknown validators). + get_pubkey(validator_index) + }; + + BlockSignatureVerifier::new(state, get_pubkey, decompressor, spec) } /// Verify that `header` was signed with a valid signature from its proposer. @@ -1432,45 +1455,6 @@ fn verify_header_signature( } } -fn expose_participation_metrics(summaries: &[EpochProcessingSummary]) { - if !cfg!(feature = "participation_metrics") { - return; - } - - for summary in summaries { - let b = &summary.total_balances; - - metrics::maybe_set_float_gauge( - &metrics::PARTICIPATION_PREV_EPOCH_ATTESTER, - participation_ratio(b.previous_epoch_attesters(), b.previous_epoch()), - ); - - metrics::maybe_set_float_gauge( - &metrics::PARTICIPATION_PREV_EPOCH_TARGET_ATTESTER, - participation_ratio(b.previous_epoch_target_attesters(), b.previous_epoch()), - ); - - metrics::maybe_set_float_gauge( - &metrics::PARTICIPATION_PREV_EPOCH_HEAD_ATTESTER, - participation_ratio(b.previous_epoch_head_attesters(), b.previous_epoch()), - ); - } -} - -fn participation_ratio(section: u64, total: u64) -> Option { - // Reduce the precision to help ensure we fit inside a u32. - const PRECISION: u64 = 100_000_000; - - let section: f64 = u32::try_from(section / PRECISION).ok()?.into(); - let total: f64 = u32::try_from(total / PRECISION).ok()?.into(); - - if total > 0_f64 { - Some(section / total) - } else { - None - } -} - fn write_state(prefix: &str, state: &BeaconState, log: &Logger) { if WRITE_BLOCK_PROCESSING_SSZ { let root = state.tree_hash_root(); diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 518c83659f1..d18a6059641 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -251,6 +251,13 @@ where .get_item::>(&OP_POOL_DB_KEY) .map_err(|e| format!("DB error whilst reading persisted op pool: {:?}", e))? .map(PersistedOperationPool::into_operation_pool) + .transpose() + .map_err(|e| { + format!( + "Error while creating the op pool from the persisted op pool: {:?}", + e + ) + })? .unwrap_or_else(OperationPool::new), ); @@ -506,12 +513,20 @@ where // TODO: allow for persisting and loading the pool from disk. naive_aggregation_pool: <_>::default(), // TODO: allow for persisting and loading the pool from disk. + naive_sync_aggregation_pool: <_>::default(), + // TODO: allow for persisting and loading the pool from disk. observed_attestations: <_>::default(), // TODO: allow for persisting and loading the pool from disk. + observed_sync_contributions: <_>::default(), + // TODO: allow for persisting and loading the pool from disk. observed_attesters: <_>::default(), // TODO: allow for persisting and loading the pool from disk. + observed_sync_contributors: <_>::default(), + // TODO: allow for persisting and loading the pool from disk. observed_aggregators: <_>::default(), // TODO: allow for persisting and loading the pool from disk. + observed_sync_aggregators: <_>::default(), + // TODO: allow for persisting and loading the pool from disk. observed_block_producers: <_>::default(), // TODO: allow for persisting and loading the pool from disk. observed_voluntary_exits: <_>::default(), @@ -532,6 +547,7 @@ where shuffling_cache: TimeoutRwLock::new(ShufflingCache::new()), beacon_proposer_cache: <_>::default(), validator_pubkey_cache: TimeoutRwLock::new(validator_pubkey_cache), + attester_cache: <_>::default(), disabled_forks: self.disabled_forks, shutdown_sender: self .shutdown_sender @@ -546,6 +562,16 @@ where .head() .map_err(|e| format!("Failed to get head: {:?}", e))?; + // Prime the attester cache with the head state. + beacon_chain + .attester_cache + .maybe_cache_state( + &head.beacon_state, + head.beacon_block_root, + &beacon_chain.spec, + ) + .map_err(|e| format!("Failed to prime attester cache: {:?}", e))?; + // Only perform the check if it was configured. if let Some(wss_checkpoint) = beacon_chain.config.weak_subjectivity_checkpoint { if let Err(e) = beacon_chain.verify_weak_subjectivity_checkpoint( @@ -640,7 +666,7 @@ fn genesis_block( genesis_state: &mut BeaconState, spec: &ChainSpec, ) -> Result, String> { - let mut genesis_block = BeaconBlock::empty(&spec); + let mut genesis_block = BeaconBlock::empty(spec); *genesis_block.state_root_mut() = genesis_state .update_tree_hash_cache() .map_err(|e| format!("Error hashing genesis state: {:?}", e))?; diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index d5cc5eda66f..f484b194549 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -1,9 +1,10 @@ +use crate::attester_cache::Error as AttesterCacheError; use crate::beacon_chain::ForkChoiceError; use crate::beacon_fork_choice_store::Error as ForkChoiceStoreError; use crate::eth1_chain::Error as Eth1ChainError; use crate::migrate::PruningError; use crate::naive_aggregation_pool::Error as NaiveAggregationError; -use crate::observed_attestations::Error as ObservedAttestationsError; +use crate::observed_aggregates::Error as ObservedAttestationsError; use crate::observed_attesters::Error as ObservedAttestersError; use crate::observed_block_producers::Error as ObservedBlockProducersError; use futures::channel::mpsc::TrySendError; @@ -14,7 +15,7 @@ use state_processing::{ block_signature_verifier::Error as BlockSignatureVerifierError, per_block_processing::errors::{ AttestationValidationError, AttesterSlashingValidationError, ExitValidationError, - ProposerSlashingValidationError, + ProposerSlashingValidationError, SyncCommitteeMessageValidationError, }, signature_sets::Error as SignatureSetError, state_advance::Error as StateAdvanceError, @@ -60,6 +61,7 @@ pub enum BeaconChainError { }, CannotAttestToFutureState, AttestationValidationError(AttestationValidationError), + SyncCommitteeMessageValidationError(SyncCommitteeMessageValidationError), ExitValidationError(ExitValidationError), ProposerSlashingValidationError(ProposerSlashingValidationError), AttesterSlashingValidationError(AttesterSlashingValidationError), @@ -90,6 +92,7 @@ pub enum BeaconChainError { ObservedAttestationsError(ObservedAttestationsError), ObservedAttestersError(ObservedAttestersError), ObservedBlockProducersError(ObservedBlockProducersError), + AttesterCacheError(AttesterCacheError), PruningError(PruningError), ArithError(ArithError), InvalidShufflingId { @@ -98,8 +101,12 @@ pub enum BeaconChainError { }, WeakSubjectivtyVerificationFailure, WeakSubjectivtyShutdownError(TrySendError), - AttestingPriorToHead { - head_slot: Slot, + AttestingToFinalizedSlot { + finalized_slot: Slot, + request_slot: Slot, + }, + AttestingToAncientSlot { + lowest_permissible_slot: Slot, request_slot: Slot, }, BadPreState { @@ -121,10 +128,12 @@ pub enum BeaconChainError { old_slot: Slot, new_slot: Slot, }, + AltairForkDisabled, } easy_from_to!(SlotProcessingError, BeaconChainError); easy_from_to!(AttestationValidationError, BeaconChainError); +easy_from_to!(SyncCommitteeMessageValidationError, BeaconChainError); easy_from_to!(ExitValidationError, BeaconChainError); easy_from_to!(ProposerSlashingValidationError, BeaconChainError); easy_from_to!(AttesterSlashingValidationError, BeaconChainError); @@ -134,6 +143,7 @@ easy_from_to!(NaiveAggregationError, BeaconChainError); easy_from_to!(ObservedAttestationsError, BeaconChainError); easy_from_to!(ObservedAttestersError, BeaconChainError); easy_from_to!(ObservedBlockProducersError, BeaconChainError); +easy_from_to!(AttesterCacheError, BeaconChainError); easy_from_to!(BlockSignatureVerifierError, BeaconChainError); easy_from_to!(PruningError, BeaconChainError); easy_from_to!(ArithError, BeaconChainError); diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index f8c12e3c870..aa6978b79f6 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -762,7 +762,7 @@ mod test { "test should not use dummy backend" ); - let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), &spec); + let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); *state.eth1_deposit_index_mut() = 0; state.eth1_data_mut().deposit_count = 0; @@ -815,7 +815,7 @@ mod test { "cache should store all logs" ); - let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), &spec); + let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); *state.eth1_deposit_index_mut() = 0; state.eth1_data_mut().deposit_count = 0; @@ -877,10 +877,10 @@ mod test { "test should not use dummy backend" ); - let state: BeaconState = BeaconState::new(0, get_eth1_data(0), &spec); + let state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); let a = eth1_chain - .eth1_data_for_block_production(&state, &spec) + .eth1_data_for_block_production(&state, spec) .expect("should produce default eth1 data vote"); assert_eq!( a, @@ -902,11 +902,11 @@ mod test { "test should not use dummy backend" ); - let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), &spec); + let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); *state.slot_mut() = Slot::from(slots_per_eth1_voting_period * 10); let follow_distance_seconds = eth1_follow_distance * spec.seconds_per_eth1_block; - let voting_period_start = get_voting_period_start_seconds(&state, &spec); + let voting_period_start = get_voting_period_start_seconds(&state, spec); let start_eth1_block = voting_period_start - follow_distance_seconds * 2; let end_eth1_block = voting_period_start - follow_distance_seconds; @@ -926,7 +926,7 @@ mod test { }); let vote = eth1_chain - .eth1_data_for_block_production(&state, &spec) + .eth1_data_for_block_production(&state, spec) .expect("should produce default eth1 data vote"); assert_eq!( @@ -956,7 +956,7 @@ mod test { get_votes_to_consider( blocks.iter(), get_voting_period_start_seconds(&state, spec), - &spec, + spec, ), HashMap::new() ); diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index eb15a699a92..973564c699d 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -1,5 +1,6 @@ #![recursion_limit = "128"] // For lazy-static pub mod attestation_verification; +mod attester_cache; mod beacon_chain; mod beacon_fork_choice_store; mod beacon_proposer_cache; @@ -14,7 +15,7 @@ mod head_tracker; mod metrics; pub mod migrate; mod naive_aggregation_pool; -mod observed_attestations; +mod observed_aggregates; mod observed_attesters; mod observed_block_producers; pub mod observed_operations; @@ -24,6 +25,7 @@ pub mod schema_change; mod shuffling_cache; mod snapshot_cache; pub mod state_advance_timer; +pub mod sync_committee_verification; pub mod test_utils; mod timeout_rw_lock; pub mod validator_monitor; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 314d1c482f6..9791698ac9f 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1,3 +1,5 @@ +use crate::observed_attesters::SlotSubcommitteeIndex; +use crate::types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; use lazy_static::lazy_static; pub use lighthouse_metrics::*; @@ -144,10 +146,6 @@ lazy_static! { "beacon_attestation_processing_apply_to_agg_pool", "Time spent applying an attestation to the naive aggregation pool" ); - pub static ref ATTESTATION_PROCESSING_AGG_POOL_MAPS_WRITE_LOCK: Result = try_create_histogram( - "beacon_attestation_processing_agg_pool_maps_write_lock", - "Time spent waiting for the maps write lock when adding to the agg poll" - ); pub static ref ATTESTATION_PROCESSING_AGG_POOL_PRUNE: Result = try_create_histogram( "beacon_attestation_processing_agg_pool_prune", "Time spent for the agg pool to prune" @@ -212,18 +210,22 @@ lazy_static! { /* * Attestation Production */ - pub static ref ATTESTATION_PRODUCTION_REQUESTS: Result = try_create_int_counter( - "beacon_attestation_production_requests_total", - "Count of all attestation production requests" - ); - pub static ref ATTESTATION_PRODUCTION_SUCCESSES: Result = try_create_int_counter( - "beacon_attestation_production_successes_total", - "Count of attestations processed without error" - ); - pub static ref ATTESTATION_PRODUCTION_TIMES: Result = try_create_histogram( + pub static ref ATTESTATION_PRODUCTION_SECONDS: Result = try_create_histogram( "beacon_attestation_production_seconds", "Full runtime of attestation production" ); + pub static ref ATTESTATION_PRODUCTION_HEAD_SCRAPE_SECONDS: Result = try_create_histogram( + "attestation_production_head_scrape_seconds", + "Time taken to read the head state" + ); + pub static ref ATTESTATION_PRODUCTION_CACHE_INTERACTION_SECONDS: Result = try_create_histogram( + "attestation_production_cache_interaction_seconds", + "Time spent interacting with the attester cache" + ); + pub static ref ATTESTATION_PRODUCTION_CACHE_PRIME_SECONDS: Result = try_create_histogram( + "attestation_production_cache_prime_seconds", + "Time spent loading a new state from the disk due to a cache miss" + ); } // Second lazy-static block is used to account for macro recursion limit. @@ -329,22 +331,9 @@ lazy_static! { try_create_int_gauge("beacon_op_pool_proposer_slashings_total", "Count of proposer slashings in the op pool"); pub static ref OP_POOL_NUM_VOLUNTARY_EXITS: Result = try_create_int_gauge("beacon_op_pool_voluntary_exits_total", "Count of voluntary exits in the op pool"); + pub static ref OP_POOL_NUM_SYNC_CONTRIBUTIONS: Result = + try_create_int_gauge("beacon_op_pool_sync_contributions_total", "Count of sync contributions in the op pool"); - /* - * Participation Metrics - */ - pub static ref PARTICIPATION_PREV_EPOCH_ATTESTER: Result = try_create_float_gauge( - "beacon_participation_prev_epoch_attester", - "Ratio of attesting balances to total balances" - ); - pub static ref PARTICIPATION_PREV_EPOCH_TARGET_ATTESTER: Result = try_create_float_gauge( - "beacon_participation_prev_epoch_target_attester", - "Ratio of target-attesting balances to total balances" - ); - pub static ref PARTICIPATION_PREV_EPOCH_HEAD_ATTESTER: Result = try_create_float_gauge( - "beacon_participation_prev_epoch_head_attester", - "Ratio of head-attesting balances to total balances" - ); /* * Attestation Observation Metrics @@ -357,6 +346,18 @@ lazy_static! { "beacon_attn_observation_epoch_aggregators", "Count of aggregators that have been seen by the beacon chain in the previous epoch" ); + + /* + * Sync Committee Observation Metrics + */ + pub static ref SYNC_COMM_OBSERVATION_PREV_SLOT_SIGNERS: Result = try_create_int_gauge( + "beacon_sync_comm_observation_slot_signers", + "Count of sync committee contributors that have been seen by the beacon chain in the previous slot" + ); + pub static ref SYNC_COMM_OBSERVATION_PREV_SLOT_AGGREGATORS: Result = try_create_int_gauge( + "beacon_sync_comm_observation_slot_aggregators", + "Count of sync committee aggregators that have been seen by the beacon chain in the previous slot" + ); } // Third lazy-static block is used to account for macro recursion limit. @@ -649,6 +650,93 @@ lazy_static! { ); } +// Fourth lazy-static block is used to account for macro recursion limit. +lazy_static! { + /* + * Sync Committee Message Verification + */ + pub static ref SYNC_MESSAGE_PROCESSING_REQUESTS: Result = try_create_int_counter( + "beacon_sync_committee_message_processing_requests_total", + "Count of all sync messages submitted for processing" + ); + pub static ref SYNC_MESSAGE_PROCESSING_SUCCESSES: Result = try_create_int_counter( + "beacon_sync_committee_message_processing_successes_total", + "Number of sync messages verified for gossip" + ); + pub static ref SYNC_MESSAGE_GOSSIP_VERIFICATION_TIMES: Result = try_create_histogram( + "beacon_sync_committee_message_gossip_verification_seconds", + "Full runtime of sync contribution gossip verification" + ); + + /* + * Sync Committee Contribution Verification + */ + pub static ref SYNC_CONTRIBUTION_PROCESSING_REQUESTS: Result = try_create_int_counter( + "beacon_sync_contribution_processing_requests_total", + "Count of all sync contributions submitted for processing" + ); + pub static ref SYNC_CONTRIBUTION_PROCESSING_SUCCESSES: Result = try_create_int_counter( + "beacon_sync_contribution_processing_successes_total", + "Number of sync contributions verified for gossip" + ); + pub static ref SYNC_CONTRIBUTION_GOSSIP_VERIFICATION_TIMES: Result = try_create_histogram( + "beacon_sync_contribution_gossip_verification_seconds", + "Full runtime of sync contribution gossip verification" + ); + + /* + * General Sync Committee Contribution Processing + */ + pub static ref SYNC_CONTRIBUTION_PROCESSING_APPLY_TO_AGG_POOL: Result = try_create_histogram( + "beacon_sync_contribution_processing_apply_to_agg_pool", + "Time spent applying a sync contribution to the naive aggregation pool" + ); + pub static ref SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_PRUNE: Result = try_create_histogram( + "beacon_sync_contribution_processing_agg_pool_prune", + "Time spent for the agg pool to prune" + ); + pub static ref SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_INSERT: Result = try_create_histogram( + "beacon_sync_contribution_processing_agg_pool_insert", + "Time spent for the outer pool.insert() function of agg pool" + ); + pub static ref SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_CORE_INSERT: Result = try_create_histogram( + "beacon_sync_contribution_processing_agg_pool_core_insert", + "Time spent for the core map.insert() function of agg pool" + ); + pub static ref SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_AGGREGATION: Result = try_create_histogram( + "beacon_sync_contribution_processing_agg_pool_aggregation", + "Time spent doing signature aggregation when adding to the agg poll" + ); + pub static ref SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_CREATE_MAP: Result = try_create_histogram( + "beacon_sync_contribution_processing_agg_pool_create_map", + "Time spent for creating a map for a new slot" + ); + pub static ref SYNC_CONTRIBUTION_PROCESSING_APPLY_TO_OP_POOL: Result = try_create_histogram( + "beacon_sync_contribution_processing_apply_to_op_pool", + "Time spent applying a sync contribution to the block inclusion pool" + ); + pub static ref SYNC_CONTRIBUTION_PROCESSING_SIGNATURE_SETUP_TIMES: Result = try_create_histogram( + "beacon_sync_contribution_processing_signature_setup_seconds", + "Time spent on setting up for the signature verification of sync contribution processing" + ); + pub static ref SYNC_CONTRIBUTION_PROCESSING_SIGNATURE_TIMES: Result = try_create_histogram( + "beacon_sync_contribution_processing_signature_seconds", + "Time spent on the signature verification of sync contribution processing" + ); + + /* + * General Sync Committee Contribution Processing + */ + pub static ref SYNC_MESSAGE_PROCESSING_SIGNATURE_SETUP_TIMES: Result = try_create_histogram( + "beacon_sync_committee_message_processing_signature_setup_seconds", + "Time spent on setting up for the signature verification of sync message processing" + ); + pub static ref SYNC_MESSAGE_PROCESSING_SIGNATURE_TIMES: Result = try_create_histogram( + "beacon_sync_committee_message_processing_signature_seconds", + "Time spent on the signature verification of sync message processing" + ); +} + /// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot, /// head state info, etc) and update the Prometheus `DEFAULT_REGISTRY`. pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { @@ -659,6 +747,7 @@ pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { if let Some(slot) = beacon_chain.slot_clock.now() { scrape_attestation_observation(slot, beacon_chain); + scrape_sync_committee_observation(slot, beacon_chain); } set_gauge_by_usize( @@ -677,6 +766,10 @@ pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { &OP_POOL_NUM_VOLUNTARY_EXITS, beacon_chain.op_pool.num_voluntary_exits(), ); + set_gauge_by_usize( + &OP_POOL_NUM_SYNC_CONTRIBUTIONS, + beacon_chain.op_pool.num_sync_contributions(), + ); beacon_chain .validator_monitor @@ -771,6 +864,34 @@ fn scrape_attestation_observation(slot_now: Slot, chain: &B } } +fn scrape_sync_committee_observation(slot_now: Slot, chain: &BeaconChain) { + let prev_slot = slot_now - 1; + + let contributors = chain.observed_sync_contributors.read(); + let mut contributor_sum = 0; + for i in 0..SYNC_COMMITTEE_SUBNET_COUNT { + if let Some(count) = + contributors.observed_validator_count(SlotSubcommitteeIndex::new(prev_slot, i)) + { + contributor_sum += count; + } + } + drop(contributors); + set_gauge_by_usize(&SYNC_COMM_OBSERVATION_PREV_SLOT_SIGNERS, contributor_sum); + + let sync_aggregators = chain.observed_sync_aggregators.read(); + let mut aggregator_sum = 0; + for i in 0..SYNC_COMMITTEE_SUBNET_COUNT { + if let Some(count) = + sync_aggregators.observed_validator_count(SlotSubcommitteeIndex::new(prev_slot, i)) + { + aggregator_sum += count; + } + } + drop(sync_aggregators); + set_gauge_by_usize(&SYNC_COMM_OBSERVATION_PREV_SLOT_AGGREGATORS, aggregator_sum); +} + fn set_gauge_by_slot(gauge: &Result, value: Slot) { set_gauge(gauge, value.as_u64() as i64); } diff --git a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs index e303f8973b5..8d8dd19b504 100644 --- a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs +++ b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs @@ -1,13 +1,18 @@ use crate::metrics; use std::collections::HashMap; use tree_hash::TreeHash; -use types::{Attestation, AttestationData, EthSpec, Hash256, Slot}; +use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; +use types::slot_data::SlotData; +use types::sync_committee_contribution::SyncContributionData; +use types::{Attestation, AttestationData, EthSpec, Hash256, Slot, SyncCommitteeContribution}; type AttestationDataRoot = Hash256; +type SyncDataRoot = Hash256; + /// The number of slots that will be stored in the pool. /// -/// For example, if `SLOTS_RETAINED == 3` and the pool is pruned at slot `6`, then all attestations -/// at slots less than `4` will be dropped and any future attestation with a slot less than `4` +/// For example, if `SLOTS_RETAINED == 3` and the pool is pruned at slot `6`, then all items +/// at slots less than `4` will be dropped and any future item with a slot less than `4` /// will be refused. const SLOTS_RETAINED: usize = 3; @@ -16,51 +21,98 @@ const SLOTS_RETAINED: usize = 3; /// This is a DoS protection measure. const MAX_ATTESTATIONS_PER_SLOT: usize = 16_384; -/// Returned upon successfully inserting an attestation into the pool. +/// Returned upon successfully inserting an item into the pool. #[derive(Debug, PartialEq)] pub enum InsertOutcome { - /// The `attestation.data` had not been seen before and was added to the pool. - NewAttestationData { committee_index: usize }, - /// A validator signature for the given `attestation.data` was already known. No changes were + /// The item had not been seen before and was added to the pool. + NewItemInserted { committee_index: usize }, + /// A validator signature for the given item's `Data` was already known. No changes were /// made. SignatureAlreadyKnown { committee_index: usize }, - /// The `attestation.data` was known, but a signature for the given validator was not yet + /// The item's `Data` was known, but a signature for the given validator was not yet /// known. The signature was aggregated into the pool. SignatureAggregated { committee_index: usize }, } #[derive(Debug, PartialEq)] pub enum Error { - /// The given `attestation.data.slot` was too low to be stored. No changes were made. + /// The given `data.slot` was too low to be stored. No changes were made. SlotTooLow { slot: Slot, lowest_permissible_slot: Slot, }, - /// The given `attestation.aggregation_bits` field was empty. + /// The given `aggregation_bits` field was empty. NoAggregationBitsSet, - /// The given `attestation.aggregation_bits` field had more than one signature. The number of + /// The given `aggregation_bits` field had more than one signature. The number of /// signatures found is included. MoreThanOneAggregationBitSet(usize), - /// We have reached the maximum number of unique `AttestationData` that can be stored in a + /// We have reached the maximum number of unique items that can be stored in a /// slot. This is a DoS protection function. - ReachedMaxAttestationsPerSlot(usize), - /// The given `attestation.aggregation_bits` field had a different length to the one currently + ReachedMaxItemsPerSlot(usize), + /// The given `aggregation_bits` field had a different length to the one currently /// stored. This indicates a fairly serious error somewhere in the code that called this /// function. InconsistentBitfieldLengths, - /// The given `attestation` was for the incorrect slot. This is an internal error. - IncorrectSlot { expected: Slot, attestation: Slot }, + /// The given item was for the incorrect slot. This is an internal error. + IncorrectSlot { expected: Slot, actual: Slot }, +} + +/// Implemented for items in the `NaiveAggregationPool`. Requires that items implement `SlotData`, +/// which means they have an associated slot. This handles aggregation of items that are inserted. +pub trait AggregateMap { + /// `Key` should be a hash of `Data`. + type Key; + + /// The item stored in the map + type Value: Clone + SlotData; + + /// The unique fields of `Value`, hashed to create `Key`. + type Data: SlotData; + + /// Create a new `AggregateMap` with capacity `initial_capacity`. + fn new(initial_capacity: usize) -> Self; + + /// Insert a `Value` into `Self`, returning a result. + fn insert(&mut self, value: &Self::Value) -> Result; + + /// Get a `Value` from `Self` based on `Data`. + fn get(&self, data: &Self::Data) -> Option; + + /// Get a reference to the inner `HashMap`. + fn get_map(&self) -> &HashMap; + + /// Get a `Value` from `Self` based on `Key`, which is a hash of `Data`. + fn get_by_root(&self, root: &Self::Key) -> Option<&Self::Value>; + + /// The number of items store in `Self`. + fn len(&self) -> usize; + + /// Start a timer observing inserts. + fn start_insert_timer() -> Option; + + /// Start a timer observing the time it takes to create a new map for a new slot. + fn start_create_map_timer() -> Option; + + /// Start a timer observing the time it takes to prune the pool. + fn start_prune_timer() -> Option; + + /// The default capacity of `Self`. + fn default_capacity() -> usize; } /// A collection of `Attestation` objects, keyed by their `attestation.data`. Enforces that all /// `attestation` are from the same slot. -struct AggregatedAttestationMap { +pub struct AggregatedAttestationMap { map: HashMap>, } -impl AggregatedAttestationMap { +impl AggregateMap for AggregatedAttestationMap { + type Key = AttestationDataRoot; + type Value = Attestation; + type Data = AttestationData; + /// Create an empty collection with the given `initial_capacity`. - pub fn new(initial_capacity: usize) -> Self { + fn new(initial_capacity: usize) -> Self { Self { map: HashMap::with_capacity(initial_capacity), } @@ -69,7 +121,7 @@ impl AggregatedAttestationMap { /// Insert an attestation into `self`, aggregating it into the pool. /// /// The given attestation (`a`) must only have one signature. - pub fn insert(&mut self, a: &Attestation) -> Result { + fn insert(&mut self, a: &Self::Value) -> Result { let _timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_CORE_INSERT); let set_bits = a @@ -106,65 +158,190 @@ impl AggregatedAttestationMap { } } else { if self.map.len() >= MAX_ATTESTATIONS_PER_SLOT { - return Err(Error::ReachedMaxAttestationsPerSlot( - MAX_ATTESTATIONS_PER_SLOT, - )); + return Err(Error::ReachedMaxItemsPerSlot(MAX_ATTESTATIONS_PER_SLOT)); } self.map.insert(attestation_data_root, a.clone()); - Ok(InsertOutcome::NewAttestationData { committee_index }) + Ok(InsertOutcome::NewItemInserted { committee_index }) } } /// Returns an aggregated `Attestation` with the given `data`, if any. /// /// The given `a.data.slot` must match the slot that `self` was initialized with. - pub fn get(&self, data: &AttestationData) -> Option> { + fn get(&self, data: &Self::Data) -> Option { self.map.get(&data.tree_hash_root()).cloned() } + fn get_map(&self) -> &HashMap { + &self.map + } + /// Returns an aggregated `Attestation` with the given `root`, if any. - pub fn get_by_root(&self, root: &AttestationDataRoot) -> Option<&Attestation> { + fn get_by_root(&self, root: &Self::Key) -> Option<&Self::Value> { self.map.get(root) } - /// Iterate all attestations in `self`. - pub fn iter(&self) -> impl Iterator> { - self.map.iter().map(|(_key, attestation)| attestation) + fn len(&self) -> usize { + self.map.len() + } + + fn start_insert_timer() -> Option { + metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_INSERT) + } + + fn start_create_map_timer() -> Option { + metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_CREATE_MAP) + } + + fn start_prune_timer() -> Option { + metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_PRUNE) + } + + /// Use the `TARGET_COMMITTEE_SIZE`. + /// + /// Note: hard-coded until `TARGET_COMMITTEE_SIZE` is available via `EthSpec`. + fn default_capacity() -> usize { + 128 + } +} + +/// A collection of `SyncCommitteeContribution`, keyed by their `SyncContributionData`. Enforces that all +/// contributions are from the same slot. +pub struct SyncContributionAggregateMap { + map: HashMap>, +} + +impl AggregateMap for SyncContributionAggregateMap { + type Key = SyncDataRoot; + type Value = SyncCommitteeContribution; + type Data = SyncContributionData; + + /// Create an empty collection with the given `initial_capacity`. + fn new(initial_capacity: usize) -> Self { + Self { + map: HashMap::with_capacity(initial_capacity), + } + } + + /// Insert a sync committee contribution into `self`, aggregating it into the pool. + /// + /// The given sync contribution must only have one signature. + fn insert( + &mut self, + contribution: &SyncCommitteeContribution, + ) -> Result { + let _timer = + metrics::start_timer(&metrics::SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_CORE_INSERT); + + let set_bits = contribution + .aggregation_bits + .iter() + .enumerate() + .filter(|(_i, bit)| *bit) + .map(|(i, _bit)| i) + .collect::>(); + + let committee_index = set_bits + .first() + .copied() + .ok_or(Error::NoAggregationBitsSet)?; + + if set_bits.len() > 1 { + return Err(Error::MoreThanOneAggregationBitSet(set_bits.len())); + } + + let sync_data_root = SyncContributionData::from_contribution(contribution).tree_hash_root(); + + if let Some(existing_contribution) = self.map.get_mut(&sync_data_root) { + if existing_contribution + .aggregation_bits + .get(committee_index) + .map_err(|_| Error::InconsistentBitfieldLengths)? + { + Ok(InsertOutcome::SignatureAlreadyKnown { committee_index }) + } else { + let _timer = metrics::start_timer( + &metrics::SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_AGGREGATION, + ); + existing_contribution.aggregate(contribution); + Ok(InsertOutcome::SignatureAggregated { committee_index }) + } + } else { + if self.map.len() >= E::sync_committee_size() { + return Err(Error::ReachedMaxItemsPerSlot(E::sync_committee_size())); + } + + self.map.insert(sync_data_root, contribution.clone()); + Ok(InsertOutcome::NewItemInserted { committee_index }) + } } - pub fn len(&self) -> usize { + /// Returns an aggregated `SyncCommitteeContribution` with the given `data`, if any. + /// + /// The given `data.slot` must match the slot that `self` was initialized with. + fn get(&self, data: &SyncContributionData) -> Option> { + self.map.get(&data.tree_hash_root()).cloned() + } + + fn get_map(&self) -> &HashMap> { + &self.map + } + + /// Returns an aggregated `SyncCommitteeContribution` with the given `root`, if any. + fn get_by_root(&self, root: &SyncDataRoot) -> Option<&SyncCommitteeContribution> { + self.map.get(root) + } + + fn len(&self) -> usize { self.map.len() } + + fn start_insert_timer() -> Option { + metrics::start_timer(&metrics::SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_INSERT) + } + + fn start_create_map_timer() -> Option { + metrics::start_timer(&metrics::SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_CREATE_MAP) + } + + fn start_prune_timer() -> Option { + metrics::start_timer(&metrics::SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_PRUNE) + } + + /// Default to `SYNC_COMMITTEE_SUBNET_COUNT`. + fn default_capacity() -> usize { + SYNC_COMMITTEE_SUBNET_COUNT as usize + } } -/// A pool of `Attestation` that is specially designed to store "unaggregated" attestations from -/// the native aggregation scheme. +/// A pool of `Attestation` or `SyncCommitteeContribution` that is specially designed to store +/// "unaggregated" messages from the native aggregation scheme. /// -/// **The `NaiveAggregationPool` does not do any signature or attestation verification. It assumes -/// that all `Attestation` objects provided are valid.** +/// **The `NaiveAggregationPool` does not do any verification. It assumes that all `Attestation` +/// or `SyncCommitteeContribution` objects provided are valid.** /// /// ## Details /// -/// The pool sorts the `Attestation` by `attestation.data.slot`, then by `attestation.data`. +/// The pool sorts the items by `slot`, then by `Data`. /// -/// As each unaggregated attestation is added it is aggregated with any existing `attestation` with -/// the same `AttestationData`. Considering that the pool only accepts attestations with a single +/// As each item is added it is aggregated with any existing item with the same `Data`. Considering +/// that the pool only accepts attestations or sync contributions with a single /// signature, there should only ever be a single aggregated `Attestation` for any given -/// `AttestationData`. +/// `AttestationData` or a single `SyncCommitteeContribution` for any given `SyncContributionData`. /// -/// The pool has a capacity for `SLOTS_RETAINED` slots, when a new `attestation.data.slot` is +/// The pool has a capacity for `SLOTS_RETAINED` slots, when a new `slot` is /// provided, the oldest slot is dropped and replaced with the new slot. The pool can also be -/// pruned by supplying a `current_slot`; all existing attestations with a slot lower than -/// `current_slot - SLOTS_RETAINED` will be removed and any future attestation with a slot lower -/// than that will also be refused. Pruning is done automatically based upon the attestations it +/// pruned by supplying a `current_slot`; all existing items with a slot lower than +/// `current_slot - SLOTS_RETAINED` will be removed and any future item with a slot lower +/// than that will also be refused. Pruning is done automatically based upon the items it /// receives and it can be triggered manually. -pub struct NaiveAggregationPool { +pub struct NaiveAggregationPool { lowest_permissible_slot: Slot, - maps: HashMap>, + maps: HashMap, } -impl Default for NaiveAggregationPool { +impl Default for NaiveAggregationPool { fn default() -> Self { Self { lowest_permissible_slot: Slot::new(0), @@ -173,20 +350,20 @@ impl Default for NaiveAggregationPool { } } -impl NaiveAggregationPool { - /// Insert an attestation into `self`, aggregating it into the pool. +impl NaiveAggregationPool { + /// Insert an item into `self`, aggregating it into the pool. /// - /// The given attestation (`a`) must only have one signature and have an - /// `attestation.data.slot` that is not lower than `self.lowest_permissible_slot`. + /// The given item must only have one signature and have an + /// `slot` that is not lower than `self.lowest_permissible_slot`. /// - /// The pool may be pruned if the given `attestation.data` has a slot higher than any + /// The pool may be pruned if the given item has a slot higher than any /// previously seen. - pub fn insert(&mut self, attestation: &Attestation) -> Result { - let _timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_INSERT); - let slot = attestation.data.slot; + pub fn insert(&mut self, item: &T::Value) -> Result { + let _timer = T::start_insert_timer(); + let slot = item.get_slot(); let lowest_permissible_slot = self.lowest_permissible_slot; - // Reject any attestations that are too old. + // Reject any items that are too old. if slot < lowest_permissible_slot { return Err(Error::SlotTooLow { slot, @@ -194,14 +371,10 @@ impl NaiveAggregationPool { }); } - let lock_timer = - metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_MAPS_WRITE_LOCK); - drop(lock_timer); - let outcome = if let Some(map) = self.maps.get_mut(&slot) { - map.insert(attestation) + map.insert(item) } else { - let _timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_CREATE_MAP); + let _timer = T::start_create_map_timer(); // To avoid re-allocations, try and determine a rough initial capacity for the new item // by obtaining the mean size of all items in earlier epoch. let (count, sum) = self @@ -213,12 +386,11 @@ impl NaiveAggregationPool { .map(|(_slot, map)| map.len()) .fold((0, 0), |(count, sum), len| (count + 1, sum + len)); - // Use the mainnet default committee size if we can't determine an average. - let initial_capacity = sum.checked_div(count).unwrap_or(128); + let initial_capacity = sum.checked_div(count).unwrap_or_else(T::default_capacity); - let mut item = AggregatedAttestationMap::new(initial_capacity); - let outcome = item.insert(attestation); - self.maps.insert(slot, item); + let mut aggregate_map = T::new(initial_capacity); + let outcome = aggregate_map.insert(item); + self.maps.insert(slot, aggregate_map); outcome }; @@ -228,39 +400,39 @@ impl NaiveAggregationPool { outcome } - /// Returns the total number of attestations stored in `self`. - pub fn num_attestations(&self) -> usize { + /// Returns the total number of items stored in `self`. + pub fn num_items(&self) -> usize { self.maps.iter().map(|(_, map)| map.len()).sum() } - /// Returns an aggregated `Attestation` with the given `data`, if any. - pub fn get(&self, data: &AttestationData) -> Option> { - self.maps.get(&data.slot).and_then(|map| map.get(data)) + /// Returns an aggregated `T::Value` with the given `T::Data`, if any. + pub fn get(&self, data: &T::Data) -> Option { + self.maps + .get(&data.get_slot()) + .and_then(|map| map.get(data)) } - /// Returns an aggregated `Attestation` with the given `data`, if any. - pub fn get_by_slot_and_root( - &self, - slot: Slot, - root: &AttestationDataRoot, - ) -> Option> { + /// Returns an aggregated `T::Value` with the given `slot` and `root`, if any. + pub fn get_by_slot_and_root(&self, slot: Slot, root: &T::Key) -> Option { self.maps .get(&slot) .and_then(|map| map.get_by_root(root).cloned()) } - /// Iterate all attestations in all slots of `self`. - pub fn iter(&self) -> impl Iterator> { - self.maps.iter().map(|(_slot, map)| map.iter()).flatten() + /// Iterate all items in all slots of `self`. + pub fn iter(&self) -> impl Iterator { + self.maps + .iter() + .map(|(_slot, map)| map.get_map().iter().map(|(_key, value)| value)) + .flatten() } - /// Removes any attestations with a slot lower than `current_slot` and bars any future - /// attestations with a slot lower than `current_slot - SLOTS_RETAINED`. + /// Removes any items with a slot lower than `current_slot` and bars any future + /// items with a slot lower than `current_slot - SLOTS_RETAINED`. pub fn prune(&mut self, current_slot: Slot) { - let _timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_PRUNE); + let _timer = T::start_prune_timer(); - // Taking advantage of saturating subtraction on `Slot`. - let lowest_permissible_slot = current_slot - Slot::from(SLOTS_RETAINED); + let lowest_permissible_slot = current_slot.saturating_sub(Slot::from(SLOTS_RETAINED)); // No need to prune if the lowest permissible slot has not changed and the queue length is // less than the maximum @@ -301,9 +473,10 @@ impl NaiveAggregationPool { mod tests { use super::*; use ssz_types::BitList; + use store::BitVector; use types::{ test_utils::{generate_deterministic_keypair, test_random_instance}, - Fork, Hash256, + Fork, Hash256, SyncCommitteeMessage, }; type E = types::MainnetEthSpec; @@ -315,7 +488,14 @@ mod tests { a } - fn sign(a: &mut Attestation, i: usize, genesis_validators_root: Hash256) { + fn get_sync_contribution(slot: Slot) -> SyncCommitteeContribution { + let mut a: SyncCommitteeContribution = test_random_instance(); + a.slot = slot; + a.aggregation_bits = BitVector::new(); + a + } + + fn sign_attestation(a: &mut Attestation, i: usize, genesis_validators_root: Hash256) { a.sign( &generate_deterministic_keypair(i).sk, i, @@ -326,187 +506,294 @@ mod tests { .expect("should sign attestation"); } - fn unset_bit(a: &mut Attestation, i: usize) { + fn sign_sync_contribution( + a: &mut SyncCommitteeContribution, + i: usize, + genesis_validators_root: Hash256, + ) { + let sync_message = SyncCommitteeMessage::new::( + a.slot, + a.beacon_block_root, + i as u64, + &generate_deterministic_keypair(i).sk, + &Fork::default(), + genesis_validators_root, + &E::default_spec(), + ); + let signed_contribution: SyncCommitteeContribution = + SyncCommitteeContribution::from_message(&sync_message, a.subcommittee_index, i) + .unwrap(); + + a.aggregate(&signed_contribution); + } + + fn unset_attestation_bit(a: &mut Attestation, i: usize) { a.aggregation_bits .set(i, false) .expect("should unset aggregation bit") } - #[test] - fn single_attestation() { - let mut a = get_attestation(Slot::new(0)); + fn unset_sync_contribution_bit(a: &mut SyncCommitteeContribution, i: usize) { + a.aggregation_bits + .set(i, false) + .expect("should unset aggregation bit") + } - let mut pool = NaiveAggregationPool::default(); + fn mutate_attestation_block_root(a: &mut Attestation, block_root: Hash256) { + a.data.beacon_block_root = block_root + } - assert_eq!( - pool.insert(&a), - Err(Error::NoAggregationBitsSet), - "should not accept attestation without any signatures" - ); + fn mutate_attestation_slot(a: &mut Attestation, slot: Slot) { + a.data.slot = slot + } - sign(&mut a, 0, Hash256::random()); + fn attestation_block_root_comparator(a: &Attestation, block_root: Hash256) -> bool { + a.data.beacon_block_root == block_root + } - assert_eq!( - pool.insert(&a), - Ok(InsertOutcome::NewAttestationData { committee_index: 0 }), - "should accept new attestation" - ); - assert_eq!( - pool.insert(&a), - Ok(InsertOutcome::SignatureAlreadyKnown { committee_index: 0 }), - "should acknowledge duplicate signature" - ); + fn key_from_attestation(a: &Attestation) -> AttestationData { + a.data.clone() + } - let retrieved = pool - .get(&a.data) - .expect("should not error while getting attestation"); - assert_eq!( - retrieved, a, - "retrieved attestation should equal the one inserted" - ); + fn mutate_sync_contribution_block_root( + a: &mut SyncCommitteeContribution, + block_root: Hash256, + ) { + a.beacon_block_root = block_root + } - sign(&mut a, 1, Hash256::random()); + fn mutate_sync_contribution_slot(a: &mut SyncCommitteeContribution, slot: Slot) { + a.slot = slot + } - assert_eq!( - pool.insert(&a), - Err(Error::MoreThanOneAggregationBitSet(2)), - "should not accept attestation with multiple signatures" - ); + fn sync_contribution_block_root_comparator( + a: &SyncCommitteeContribution, + block_root: Hash256, + ) -> bool { + a.beacon_block_root == block_root } - #[test] - fn multiple_attestations() { - let mut a_0 = get_attestation(Slot::new(0)); - let mut a_1 = a_0.clone(); + fn key_from_sync_contribution(a: &SyncCommitteeContribution) -> SyncContributionData { + SyncContributionData::from_contribution(a) + } - let genesis_validators_root = Hash256::random(); - sign(&mut a_0, 0, genesis_validators_root); - sign(&mut a_1, 1, genesis_validators_root); + macro_rules! test_suite { + ( + $mod_name: ident, + $get_method_name: ident, + $sign_method_name: ident, + $unset_method_name: ident, + $block_root_mutator: ident, + $slot_mutator: ident, + $block_root_comparator: ident, + $key_getter: ident, + $map_type: ident, + $item_limit: expr + ) => { + #[cfg(test)] + mod $mod_name { + use super::*; + + #[test] + fn single_item() { + let mut a = $get_method_name(Slot::new(0)); + + let mut pool: NaiveAggregationPool<$map_type> = + NaiveAggregationPool::default(); - let mut pool = NaiveAggregationPool::default(); + assert_eq!( + pool.insert(&a), + Err(Error::NoAggregationBitsSet), + "should not accept item without any signatures" + ); - assert_eq!( - pool.insert(&a_0), - Ok(InsertOutcome::NewAttestationData { committee_index: 0 }), - "should accept a_0" - ); - assert_eq!( - pool.insert(&a_1), - Ok(InsertOutcome::SignatureAggregated { committee_index: 1 }), - "should accept a_1" - ); + $sign_method_name(&mut a, 0, Hash256::random()); - let retrieved = pool - .get(&a_0.data) - .expect("should not error while getting attestation"); + assert_eq!( + pool.insert(&a), + Ok(InsertOutcome::NewItemInserted { committee_index: 0 }), + "should accept new item" + ); + assert_eq!( + pool.insert(&a), + Ok(InsertOutcome::SignatureAlreadyKnown { committee_index: 0 }), + "should acknowledge duplicate signature" + ); - let mut a_01 = a_0.clone(); - a_01.aggregate(&a_1); + let retrieved = pool + .get(&$key_getter(&a)) + .expect("should not error while getting item"); + assert_eq!(retrieved, a, "retrieved item should equal the one inserted"); - assert_eq!( - retrieved, a_01, - "retrieved attestation should be aggregated" - ); + $sign_method_name(&mut a, 1, Hash256::random()); - /* - * Throw a different attestation data in there and ensure it isn't aggregated - */ - - let mut a_different = a_0.clone(); - let different_root = Hash256::from_low_u64_be(1337); - unset_bit(&mut a_different, 0); - sign(&mut a_different, 2, genesis_validators_root); - assert_ne!(a_different.data.beacon_block_root, different_root); - a_different.data.beacon_block_root = different_root; - - assert_eq!( - pool.insert(&a_different), - Ok(InsertOutcome::NewAttestationData { committee_index: 2 }), - "should accept a_different" - ); + assert_eq!( + pool.insert(&a), + Err(Error::MoreThanOneAggregationBitSet(2)), + "should not accept item with multiple signatures" + ); + } - assert_eq!( - pool.get(&a_0.data) - .expect("should not error while getting attestation"), - retrieved, - "should not have aggregated different attestation data" - ); - } + #[test] + fn multiple_items() { + let mut a_0 = $get_method_name(Slot::new(0)); + let mut a_1 = a_0.clone(); + + let genesis_validators_root = Hash256::random(); + $sign_method_name(&mut a_0, 0, genesis_validators_root); + $sign_method_name(&mut a_1, 1, genesis_validators_root); + + let mut pool: NaiveAggregationPool<$map_type> = + NaiveAggregationPool::default(); - #[test] - fn auto_pruning() { - let mut base = get_attestation(Slot::new(0)); - sign(&mut base, 0, Hash256::random()); + assert_eq!( + pool.insert(&a_0), + Ok(InsertOutcome::NewItemInserted { committee_index: 0 }), + "should accept a_0" + ); + assert_eq!( + pool.insert(&a_1), + Ok(InsertOutcome::SignatureAggregated { committee_index: 1 }), + "should accept a_1" + ); - let mut pool = NaiveAggregationPool::default(); + let retrieved = pool + .get(&$key_getter(&a_0)) + .expect("should not error while getting attestation"); - for i in 0..SLOTS_RETAINED * 2 { - let slot = Slot::from(i); - let mut a = base.clone(); - a.data.slot = slot; + let mut a_01 = a_0.clone(); + a_01.aggregate(&a_1); - assert_eq!( - pool.insert(&a), - Ok(InsertOutcome::NewAttestationData { committee_index: 0 }), - "should accept new attestation" - ); + assert_eq!(retrieved, a_01, "retrieved item should be aggregated"); - if i < SLOTS_RETAINED { - let len = i + 1; - assert_eq!(pool.maps.len(), len, "the pool should have length {}", len); - } else { - assert_eq!( - pool.maps.len(), - SLOTS_RETAINED, - "the pool should have length SLOTS_RETAINED" - ); + /* + * Throw different data in there and ensure it isn't aggregated + */ - let mut pool_slots = pool - .maps - .iter() - .map(|(slot, _map)| *slot) - .collect::>(); + let mut a_different = a_0.clone(); + let different_root = Hash256::from_low_u64_be(1337); + $unset_method_name(&mut a_different, 0); + $sign_method_name(&mut a_different, 2, genesis_validators_root); + assert!(!$block_root_comparator(&a_different, different_root)); + $block_root_mutator(&mut a_different, different_root); - pool_slots.sort_unstable(); + assert_eq!( + pool.insert(&a_different), + Ok(InsertOutcome::NewItemInserted { committee_index: 2 }), + "should accept a_different" + ); - for (j, pool_slot) in pool_slots.iter().enumerate() { - let expected_slot = slot - (SLOTS_RETAINED - 1 - j) as u64; assert_eq!( - *pool_slot, expected_slot, - "the slot of the map should be {}", - expected_slot - ) + pool.get(&$key_getter(&a_0)) + .expect("should not error while getting item"), + retrieved, + "should not have aggregated different items with different data" + ); } - } - } - } - #[test] - fn max_attestations() { - let mut base = get_attestation(Slot::new(0)); - sign(&mut base, 0, Hash256::random()); + #[test] + fn auto_pruning_item() { + let mut base = $get_method_name(Slot::new(0)); + $sign_method_name(&mut base, 0, Hash256::random()); + + let mut pool: NaiveAggregationPool<$map_type> = + NaiveAggregationPool::default(); + + for i in 0..SLOTS_RETAINED * 2 { + let slot = Slot::from(i); + let mut a = base.clone(); + $slot_mutator(&mut a, slot); + + assert_eq!( + pool.insert(&a), + Ok(InsertOutcome::NewItemInserted { committee_index: 0 }), + "should accept new item" + ); + + if i < SLOTS_RETAINED { + let len = i + 1; + assert_eq!(pool.maps.len(), len, "the pool should have length {}", len); + } else { + assert_eq!( + pool.maps.len(), + SLOTS_RETAINED, + "the pool should have length SLOTS_RETAINED" + ); + + let mut pool_slots = pool + .maps + .iter() + .map(|(slot, _map)| *slot) + .collect::>(); + + pool_slots.sort_unstable(); + + for (j, pool_slot) in pool_slots.iter().enumerate() { + let expected_slot = slot - (SLOTS_RETAINED - 1 - j) as u64; + assert_eq!( + *pool_slot, expected_slot, + "the slot of the map should be {}", + expected_slot + ) + } + } + } + } - let mut pool = NaiveAggregationPool::default(); + #[test] + fn max_items() { + let mut base = $get_method_name(Slot::new(0)); + $sign_method_name(&mut base, 0, Hash256::random()); + + let mut pool: NaiveAggregationPool<$map_type> = + NaiveAggregationPool::default(); + + for i in 0..=$item_limit { + let mut a = base.clone(); + $block_root_mutator(&mut a, Hash256::from_low_u64_be(i as u64)); + + if i < $item_limit { + assert_eq!( + pool.insert(&a), + Ok(InsertOutcome::NewItemInserted { committee_index: 0 }), + "should accept item below limit" + ); + } else { + assert_eq!( + pool.insert(&a), + Err(Error::ReachedMaxItemsPerSlot($item_limit)), + "should not accept item above limit" + ); + } + } + } + } + }; + } - for i in 0..=MAX_ATTESTATIONS_PER_SLOT { - let mut a = base.clone(); - a.data.beacon_block_root = Hash256::from_low_u64_be(i as u64); + test_suite! { + attestation_tests, + get_attestation, + sign_attestation, + unset_attestation_bit, + mutate_attestation_block_root, + mutate_attestation_slot, + attestation_block_root_comparator, + key_from_attestation, + AggregatedAttestationMap, + MAX_ATTESTATIONS_PER_SLOT + } - if i < MAX_ATTESTATIONS_PER_SLOT { - assert_eq!( - pool.insert(&a), - Ok(InsertOutcome::NewAttestationData { committee_index: 0 }), - "should accept attestation below limit" - ); - } else { - assert_eq!( - pool.insert(&a), - Err(Error::ReachedMaxAttestationsPerSlot( - MAX_ATTESTATIONS_PER_SLOT - )), - "should not accept attestation above limit" - ); - } - } + test_suite! { + sync_contribution_tests, + get_sync_contribution, + sign_sync_contribution, + unset_sync_contribution_bit, + mutate_sync_contribution_block_root, + mutate_sync_contribution_slot, + sync_contribution_block_root_comparator, + key_from_sync_contribution, + SyncContributionAggregateMap, + E::sync_committee_size() } } diff --git a/beacon_node/beacon_chain/src/observed_aggregates.rs b/beacon_node/beacon_chain/src/observed_aggregates.rs new file mode 100644 index 00000000000..c524bd682ab --- /dev/null +++ b/beacon_node/beacon_chain/src/observed_aggregates.rs @@ -0,0 +1,507 @@ +//! Provides an `ObservedAggregates` struct which allows us to reject aggregated attestations or +//! sync committee contributions if we've already seen them. + +use std::collections::HashSet; +use std::marker::PhantomData; +use tree_hash::TreeHash; +use types::consts::altair::{ + SYNC_COMMITTEE_SUBNET_COUNT, TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE, +}; +use types::slot_data::SlotData; +use types::{Attestation, EthSpec, Hash256, Slot, SyncCommitteeContribution}; + +pub type ObservedSyncContributions = ObservedAggregates, E>; +pub type ObservedAggregateAttestations = ObservedAggregates, E>; + +/// A trait use to associate capacity constants with the type being stored in `ObservedAggregates`. +pub trait Consts { + /// The default capacity of items stored per slot, in a single `SlotHashSet`. + const DEFAULT_PER_SLOT_CAPACITY: usize; + + /// The maximum number of slots + fn max_slot_capacity() -> usize; + + /// The maximum number of items stored per slot, in a single `SlotHashSet`. + fn max_per_slot_capacity() -> usize; +} + +impl Consts for Attestation { + /// Use 128 as it's the target committee size for the mainnet spec. This is perhaps a little + /// wasteful for the minimal spec, but considering it's approx. 128 * 32 bytes we're not wasting + /// much. + const DEFAULT_PER_SLOT_CAPACITY: usize = 128; + + /// We need to keep attestations for each slot of the current epoch. + fn max_slot_capacity() -> usize { + T::slots_per_epoch() as usize + } + + /// As a DoS protection measure, the maximum number of distinct `Attestations` or + /// `SyncCommitteeContributions` that will be recorded for each slot. + /// + /// Currently this is set to ~524k. If we say that each entry is 40 bytes (Hash256 (32 bytes) + an + /// 8 byte hash) then this comes to about 20mb per slot. If we're storing 34 of these slots, then + /// we're at 680mb. This is a lot of memory usage, but probably not a show-stopper for most + /// reasonable hardware. + /// + /// Upstream conditions should strongly restrict the amount of attestations that can show up in + /// this pool. The maximum size with respect to upstream restrictions is more likely on the order + /// of the number of validators. + fn max_per_slot_capacity() -> usize { + 1 << 19 // 524,288 + } +} + +impl Consts for SyncCommitteeContribution { + /// Set to `TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE * SYNC_COMMITTEE_SUBNET_COUNT`. This is the + /// expected number of aggregators per slot across all subcommittees. + const DEFAULT_PER_SLOT_CAPACITY: usize = + (SYNC_COMMITTEE_SUBNET_COUNT * TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE) as usize; + + /// We only need to keep contributions related to the current slot. + fn max_slot_capacity() -> usize { + 1 + } + + /// We should never receive more aggregates than there are sync committee participants. + fn max_per_slot_capacity() -> usize { + T::sync_committee_size() + } +} + +#[derive(Debug, PartialEq)] +pub enum ObserveOutcome { + /// This item was already known. + AlreadyKnown, + /// This was the first time this item was observed. + New, +} + +#[derive(Debug, PartialEq)] +pub enum Error { + SlotTooLow { + slot: Slot, + lowest_permissible_slot: Slot, + }, + /// The function to obtain a set index failed, this is an internal error. + InvalidSetIndex(usize), + /// We have reached the maximum number of unique items that can be observed in a slot. + /// This is a DoS protection function. + ReachedMaxObservationsPerSlot(usize), + IncorrectSlot { + expected: Slot, + attestation: Slot, + }, +} + +/// A `HashSet` that contains entries related to some `Slot`. +struct SlotHashSet { + set: HashSet, + slot: Slot, + max_capacity: usize, +} + +impl SlotHashSet { + pub fn new(slot: Slot, initial_capacity: usize, max_capacity: usize) -> Self { + Self { + slot, + set: HashSet::with_capacity(initial_capacity), + max_capacity, + } + } + + /// Store the items in self so future observations recognise its existence. + pub fn observe_item( + &mut self, + item: &T, + root: Hash256, + ) -> Result { + if item.get_slot() != self.slot { + return Err(Error::IncorrectSlot { + expected: self.slot, + attestation: item.get_slot(), + }); + } + + if self.set.contains(&root) { + Ok(ObserveOutcome::AlreadyKnown) + } else { + // Here we check to see if this slot has reached the maximum observation count. + // + // The resulting behaviour is that we are no longer able to successfully observe new + // items, however we will continue to return `is_known` values. We could also + // disable `is_known`, however then we would stop forwarding items across the + // gossip network and I think that this is a worse case than sending some invalid ones. + // The underlying libp2p network is responsible for removing duplicate messages, so + // this doesn't risk a broadcast loop. + if self.set.len() >= self.max_capacity { + return Err(Error::ReachedMaxObservationsPerSlot(self.max_capacity)); + } + + self.set.insert(root); + + Ok(ObserveOutcome::New) + } + } + + /// Indicates if `item` has been observed before. + pub fn is_known(&self, item: &T, root: Hash256) -> Result { + if item.get_slot() != self.slot { + return Err(Error::IncorrectSlot { + expected: self.slot, + attestation: item.get_slot(), + }); + } + + Ok(self.set.contains(&root)) + } + + /// The number of observed items in `self`. + pub fn len(&self) -> usize { + self.set.len() + } +} + +/// Stores the roots of objects for some number of `Slots`, so we can determine if +/// these have previously been seen on the network. +pub struct ObservedAggregates { + lowest_permissible_slot: Slot, + sets: Vec, + _phantom_spec: PhantomData, + _phantom_tree_hash: PhantomData, +} + +impl Default for ObservedAggregates { + fn default() -> Self { + Self { + lowest_permissible_slot: Slot::new(0), + sets: vec![], + _phantom_spec: PhantomData, + _phantom_tree_hash: PhantomData, + } + } +} + +impl ObservedAggregates { + /// Store the root of `item` in `self`. + /// + /// `root` must equal `item.tree_hash_root()`. + pub fn observe_item( + &mut self, + item: &T, + root_opt: Option, + ) -> Result { + let index = self.get_set_index(item.get_slot())?; + let root = root_opt.unwrap_or_else(|| item.tree_hash_root()); + + self.sets + .get_mut(index) + .ok_or(Error::InvalidSetIndex(index)) + .and_then(|set| set.observe_item(item, root)) + } + + /// Check to see if the `root` of `item` is in self. + /// + /// `root` must equal `a.tree_hash_root()`. + pub fn is_known(&mut self, item: &T, root: Hash256) -> Result { + let index = self.get_set_index(item.get_slot())?; + + self.sets + .get(index) + .ok_or(Error::InvalidSetIndex(index)) + .and_then(|set| set.is_known(item, root)) + } + + /// The maximum number of slots that items are stored for. + fn max_capacity(&self) -> u64 { + // We add `2` in order to account for one slot either side of the range due to + // `MAXIMUM_GOSSIP_CLOCK_DISPARITY`. + (T::max_slot_capacity() + 2) as u64 + } + + /// Removes any items with a slot lower than `current_slot` and bars any future + /// item with a slot lower than `current_slot - SLOTS_RETAINED`. + pub fn prune(&mut self, current_slot: Slot) { + let lowest_permissible_slot = current_slot.saturating_sub(self.max_capacity() - 1); + + self.sets.retain(|set| set.slot >= lowest_permissible_slot); + + self.lowest_permissible_slot = lowest_permissible_slot; + } + + /// Returns the index of `self.set` that matches `slot`. + /// + /// If there is no existing set for this slot one will be created. If `self.sets.len() >= + /// Self::max_capacity()`, the set with the lowest slot will be replaced. + fn get_set_index(&mut self, slot: Slot) -> Result { + let lowest_permissible_slot = self.lowest_permissible_slot; + + if slot < lowest_permissible_slot { + return Err(Error::SlotTooLow { + slot, + lowest_permissible_slot, + }); + } + + // Prune the pool if this item indicates that the current slot has advanced. + if lowest_permissible_slot + self.max_capacity() < slot + 1 { + self.prune(slot) + } + + if let Some(index) = self.sets.iter().position(|set| set.slot == slot) { + return Ok(index); + } + + // To avoid re-allocations, try and determine a rough initial capacity for the new set + // by obtaining the mean size of all items in earlier epoch. + let (count, sum) = self + .sets + .iter() + // Only include slots that are less than the given slot in the average. This should + // generally avoid including recent slots that are still "filling up". + .filter(|set| set.slot < slot) + .map(|set| set.len()) + .fold((0, 0), |(count, sum), len| (count + 1, sum + len)); + // If we are unable to determine an average, just use the `self.default_per_slot_capacity`. + let initial_capacity = sum + .checked_div(count) + .unwrap_or(T::DEFAULT_PER_SLOT_CAPACITY); + + if self.sets.len() < self.max_capacity() as usize || self.sets.is_empty() { + let index = self.sets.len(); + self.sets.push(SlotHashSet::new( + slot, + initial_capacity, + T::max_per_slot_capacity(), + )); + return Ok(index); + } + + let index = self + .sets + .iter() + .enumerate() + .min_by_key(|(_i, set)| set.slot) + .map(|(i, _set)| i) + .expect("sets cannot be empty due to previous .is_empty() check"); + + self.sets[index] = SlotHashSet::new(slot, initial_capacity, T::max_per_slot_capacity()); + + Ok(index) + } +} + +#[cfg(test)] +#[cfg(not(debug_assertions))] +mod tests { + use super::*; + use tree_hash::TreeHash; + use types::{test_utils::test_random_instance, Hash256}; + + type E = types::MainnetEthSpec; + + fn get_attestation(slot: Slot, beacon_block_root: u64) -> Attestation { + let mut a: Attestation = test_random_instance(); + a.data.slot = slot; + a.data.beacon_block_root = Hash256::from_low_u64_be(beacon_block_root); + a + } + + fn get_sync_contribution(slot: Slot, beacon_block_root: u64) -> SyncCommitteeContribution { + let mut a: SyncCommitteeContribution = test_random_instance(); + a.slot = slot; + a.beacon_block_root = Hash256::from_low_u64_be(beacon_block_root); + a + } + + macro_rules! test_suite { + ($mod_name: ident, $type: ident, $method_name: ident) => { + #[cfg(test)] + mod $mod_name { + use super::*; + + const NUM_ELEMENTS: usize = 8; + + fn single_slot_test(store: &mut $type, slot: Slot) { + let items = (0..NUM_ELEMENTS as u64) + .map(|i| $method_name(slot, i)) + .collect::>(); + + for a in &items { + assert_eq!( + store.is_known(a, a.tree_hash_root()), + Ok(false), + "should indicate an unknown attestation is unknown" + ); + assert_eq!( + store.observe_item(a, None), + Ok(ObserveOutcome::New), + "should observe new attestation" + ); + } + + for a in &items { + assert_eq!( + store.is_known(a, a.tree_hash_root()), + Ok(true), + "should indicate a known attestation is known" + ); + assert_eq!( + store.observe_item(a, Some(a.tree_hash_root())), + Ok(ObserveOutcome::AlreadyKnown), + "should acknowledge an existing attestation" + ); + } + } + + #[test] + fn single_slot() { + let mut store = $type::default(); + + single_slot_test(&mut store, Slot::new(0)); + + assert_eq!(store.sets.len(), 1, "should have a single set stored"); + assert_eq!( + store.sets[0].len(), + NUM_ELEMENTS, + "set should have NUM_ELEMENTS elements" + ); + } + + #[test] + fn mulitple_contiguous_slots() { + let mut store = $type::default(); + let max_cap = store.max_capacity(); + + for i in 0..max_cap * 3 { + let slot = Slot::new(i); + + single_slot_test(&mut store, slot); + + /* + * Ensure that the number of sets is correct. + */ + + if i < max_cap { + assert_eq!( + store.sets.len(), + i as usize + 1, + "should have a {} sets stored", + i + 1 + ); + } else { + assert_eq!( + store.sets.len(), + max_cap as usize, + "should have max_capacity sets stored" + ); + } + + /* + * Ensure that each set contains the correct number of elements. + */ + + for set in &store.sets[..] { + assert_eq!( + set.len(), + NUM_ELEMENTS, + "each store should have NUM_ELEMENTS elements" + ) + } + + /* + * Ensure that all the sets have the expected slots + */ + + let mut store_slots = + store.sets.iter().map(|set| set.slot).collect::>(); + + assert!( + store_slots.len() <= store.max_capacity() as usize, + "store size should not exceed max" + ); + + store_slots.sort_unstable(); + + let expected_slots = (i.saturating_sub(max_cap - 1)..=i) + .map(Slot::new) + .collect::>(); + + assert_eq!(expected_slots, store_slots, "should have expected slots"); + } + } + + #[test] + fn mulitple_non_contiguous_slots() { + let mut store = $type::default(); + let max_cap = store.max_capacity(); + + let to_skip = vec![1_u64, 2, 3, 5, 6, 29, 30, 31, 32, 64]; + let slots = (0..max_cap * 3) + .into_iter() + .filter(|i| !to_skip.contains(i)) + .collect::>(); + + for &i in &slots { + if to_skip.contains(&i) { + continue; + } + + let slot = Slot::from(i); + + single_slot_test(&mut store, slot); + + /* + * Ensure that each set contains the correct number of elements. + */ + + for set in &store.sets[..] { + assert_eq!( + set.len(), + NUM_ELEMENTS, + "each store should have NUM_ELEMENTS elements" + ) + } + + /* + * Ensure that all the sets have the expected slots + */ + + let mut store_slots = + store.sets.iter().map(|set| set.slot).collect::>(); + + store_slots.sort_unstable(); + + assert!( + store_slots.len() <= store.max_capacity() as usize, + "store size should not exceed max" + ); + + let lowest = store.lowest_permissible_slot.as_u64(); + let highest = slot.as_u64(); + let expected_slots = (lowest..=highest) + .filter(|i| !to_skip.contains(i)) + .map(Slot::new) + .collect::>(); + + assert_eq!( + expected_slots, + &store_slots[..], + "should have expected slots" + ); + } + } + } + }; + } + test_suite!( + observed_sync_aggregates, + ObservedSyncContributions, + get_sync_contribution + ); + test_suite!( + observed_aggregate_attestations, + ObservedAggregateAttestations, + get_attestation + ); +} diff --git a/beacon_node/beacon_chain/src/observed_attestations.rs b/beacon_node/beacon_chain/src/observed_attestations.rs deleted file mode 100644 index 358a5034689..00000000000 --- a/beacon_node/beacon_chain/src/observed_attestations.rs +++ /dev/null @@ -1,424 +0,0 @@ -//! Provides an `ObservedAttestations` struct which allows us to reject aggregated attestations if -//! we've already seen the aggregated attestation. - -use std::collections::HashSet; -use std::marker::PhantomData; -use tree_hash::TreeHash; -use types::{Attestation, EthSpec, Hash256, Slot}; - -/// As a DoS protection measure, the maximum number of distinct `Attestations` that will be -/// recorded for each slot. -/// -/// Currently this is set to ~524k. If we say that each entry is 40 bytes (Hash256 (32 bytes) + an -/// 8 byte hash) then this comes to about 20mb per slot. If we're storing 34 of these slots, then -/// we're at 680mb. This is a lot of memory usage, but probably not a show-stopper for most -/// reasonable hardware. -/// -/// Upstream conditions should strongly restrict the amount of attestations that can show up in -/// this pool. The maximum size with respect to upstream restrictions is more likely on the order -/// of the number of validators. -const MAX_OBSERVATIONS_PER_SLOT: usize = 1 << 19; // 524,288 - -#[derive(Debug, PartialEq)] -pub enum ObserveOutcome { - /// This attestation was already known. - AlreadyKnown, - /// This was the first time this attestation was observed. - New, -} - -#[derive(Debug, PartialEq)] -pub enum Error { - SlotTooLow { - slot: Slot, - lowest_permissible_slot: Slot, - }, - /// The function to obtain a set index failed, this is an internal error. - InvalidSetIndex(usize), - /// We have reached the maximum number of unique `Attestation` that can be observed in a slot. - /// This is a DoS protection function. - ReachedMaxObservationsPerSlot(usize), - IncorrectSlot { - expected: Slot, - attestation: Slot, - }, -} - -/// A `HashSet` that contains entries related to some `Slot`. -struct SlotHashSet { - set: HashSet, - slot: Slot, -} - -impl SlotHashSet { - pub fn new(slot: Slot, initial_capacity: usize) -> Self { - Self { - slot, - set: HashSet::with_capacity(initial_capacity), - } - } - - /// Store the attestation in self so future observations recognise its existence. - pub fn observe_attestation( - &mut self, - a: &Attestation, - root: Hash256, - ) -> Result { - if a.data.slot != self.slot { - return Err(Error::IncorrectSlot { - expected: self.slot, - attestation: a.data.slot, - }); - } - - if self.set.contains(&root) { - Ok(ObserveOutcome::AlreadyKnown) - } else { - // Here we check to see if this slot has reached the maximum observation count. - // - // The resulting behaviour is that we are no longer able to successfully observe new - // attestations, however we will continue to return `is_known` values. We could also - // disable `is_known`, however then we would stop forwarding attestations across the - // gossip network and I think that this is a worse case than sending some invalid ones. - // The underlying libp2p network is responsible for removing duplicate messages, so - // this doesn't risk a broadcast loop. - if self.set.len() >= MAX_OBSERVATIONS_PER_SLOT { - return Err(Error::ReachedMaxObservationsPerSlot( - MAX_OBSERVATIONS_PER_SLOT, - )); - } - - self.set.insert(root); - - Ok(ObserveOutcome::New) - } - } - - /// Indicates if `a` has been observed before. - pub fn is_known(&self, a: &Attestation, root: Hash256) -> Result { - if a.data.slot != self.slot { - return Err(Error::IncorrectSlot { - expected: self.slot, - attestation: a.data.slot, - }); - } - - Ok(self.set.contains(&root)) - } - - /// The number of observed attestations in `self`. - pub fn len(&self) -> usize { - self.set.len() - } -} - -/// Stores the roots of `Attestation` objects for some number of `Slots`, so we can determine if -/// these have previously been seen on the network. -pub struct ObservedAttestations { - lowest_permissible_slot: Slot, - sets: Vec, - _phantom: PhantomData, -} - -impl Default for ObservedAttestations { - fn default() -> Self { - Self { - lowest_permissible_slot: Slot::new(0), - sets: vec![], - _phantom: PhantomData, - } - } -} - -impl ObservedAttestations { - /// Store the root of `a` in `self`. - /// - /// `root` must equal `a.tree_hash_root()`. - pub fn observe_attestation( - &mut self, - a: &Attestation, - root_opt: Option, - ) -> Result { - let index = self.get_set_index(a.data.slot)?; - let root = root_opt.unwrap_or_else(|| a.tree_hash_root()); - - self.sets - .get_mut(index) - .ok_or(Error::InvalidSetIndex(index)) - .and_then(|set| set.observe_attestation(a, root)) - } - - /// Check to see if the `root` of `a` is in self. - /// - /// `root` must equal `a.tree_hash_root()`. - pub fn is_known(&mut self, a: &Attestation, root: Hash256) -> Result { - let index = self.get_set_index(a.data.slot)?; - - self.sets - .get(index) - .ok_or(Error::InvalidSetIndex(index)) - .and_then(|set| set.is_known(a, root)) - } - - /// The maximum number of slots that attestations are stored for. - fn max_capacity(&self) -> u64 { - // We add `2` in order to account for one slot either side of the range due to - // `MAXIMUM_GOSSIP_CLOCK_DISPARITY`. - E::slots_per_epoch() + 2 - } - - /// Removes any attestations with a slot lower than `current_slot` and bars any future - /// attestations with a slot lower than `current_slot - SLOTS_RETAINED`. - pub fn prune(&mut self, current_slot: Slot) { - // Taking advantage of saturating subtraction on `Slot`. - let lowest_permissible_slot = current_slot - (self.max_capacity() - 1); - - self.sets.retain(|set| set.slot >= lowest_permissible_slot); - - self.lowest_permissible_slot = lowest_permissible_slot; - } - - /// Returns the index of `self.set` that matches `slot`. - /// - /// If there is no existing set for this slot one will be created. If `self.sets.len() >= - /// Self::max_capacity()`, the set with the lowest slot will be replaced. - fn get_set_index(&mut self, slot: Slot) -> Result { - let lowest_permissible_slot = self.lowest_permissible_slot; - - if slot < lowest_permissible_slot { - return Err(Error::SlotTooLow { - slot, - lowest_permissible_slot, - }); - } - - // Prune the pool if this attestation indicates that the current slot has advanced. - if lowest_permissible_slot + self.max_capacity() < slot + 1 { - self.prune(slot) - } - - if let Some(index) = self.sets.iter().position(|set| set.slot == slot) { - return Ok(index); - } - - // To avoid re-allocations, try and determine a rough initial capacity for the new set - // by obtaining the mean size of all items in earlier epoch. - let (count, sum) = self - .sets - .iter() - // Only include slots that are less than the given slot in the average. This should - // generally avoid including recent slots that are still "filling up". - .filter(|set| set.slot < slot) - .map(|set| set.len()) - .fold((0, 0), |(count, sum), len| (count + 1, sum + len)); - // If we are unable to determine an average, just use 128 as it's the target committee - // size for the mainnet spec. This is perhaps a little wasteful for the minimal spec, - // but considering it's approx. 128 * 32 bytes we're not wasting much. - let initial_capacity = sum.checked_div(count).unwrap_or(128); - - if self.sets.len() < self.max_capacity() as usize || self.sets.is_empty() { - let index = self.sets.len(); - self.sets.push(SlotHashSet::new(slot, initial_capacity)); - return Ok(index); - } - - let index = self - .sets - .iter() - .enumerate() - .min_by_key(|(_i, set)| set.slot) - .map(|(i, _set)| i) - .expect("sets cannot be empty due to previous .is_empty() check"); - - self.sets[index] = SlotHashSet::new(slot, initial_capacity); - - Ok(index) - } -} - -#[cfg(test)] -#[cfg(not(debug_assertions))] -mod tests { - use super::*; - use tree_hash::TreeHash; - use types::{test_utils::test_random_instance, Hash256}; - - type E = types::MainnetEthSpec; - - const NUM_ELEMENTS: usize = 8; - - fn get_attestation(slot: Slot, beacon_block_root: u64) -> Attestation { - let mut a: Attestation = test_random_instance(); - a.data.slot = slot; - a.data.beacon_block_root = Hash256::from_low_u64_be(beacon_block_root); - a - } - - fn single_slot_test(store: &mut ObservedAttestations, slot: Slot) { - let attestations = (0..NUM_ELEMENTS as u64) - .map(|i| get_attestation(slot, i)) - .collect::>(); - - for a in &attestations { - assert_eq!( - store.is_known(a, a.tree_hash_root()), - Ok(false), - "should indicate an unknown attestation is unknown" - ); - assert_eq!( - store.observe_attestation(a, None), - Ok(ObserveOutcome::New), - "should observe new attestation" - ); - } - - for a in &attestations { - assert_eq!( - store.is_known(a, a.tree_hash_root()), - Ok(true), - "should indicate a known attestation is known" - ); - assert_eq!( - store.observe_attestation(a, Some(a.tree_hash_root())), - Ok(ObserveOutcome::AlreadyKnown), - "should acknowledge an existing attestation" - ); - } - } - - #[test] - fn single_slot() { - let mut store = ObservedAttestations::default(); - - single_slot_test(&mut store, Slot::new(0)); - - assert_eq!(store.sets.len(), 1, "should have a single set stored"); - assert_eq!( - store.sets[0].len(), - NUM_ELEMENTS, - "set should have NUM_ELEMENTS elements" - ); - } - - #[test] - fn mulitple_contiguous_slots() { - let mut store = ObservedAttestations::default(); - let max_cap = store.max_capacity(); - - for i in 0..max_cap * 3 { - let slot = Slot::new(i); - - single_slot_test(&mut store, slot); - - /* - * Ensure that the number of sets is correct. - */ - - if i < max_cap { - assert_eq!( - store.sets.len(), - i as usize + 1, - "should have a {} sets stored", - i + 1 - ); - } else { - assert_eq!( - store.sets.len(), - max_cap as usize, - "should have max_capacity sets stored" - ); - } - - /* - * Ensure that each set contains the correct number of elements. - */ - - for set in &store.sets[..] { - assert_eq!( - set.len(), - NUM_ELEMENTS, - "each store should have NUM_ELEMENTS elements" - ) - } - - /* - * Ensure that all the sets have the expected slots - */ - - let mut store_slots = store.sets.iter().map(|set| set.slot).collect::>(); - - assert!( - store_slots.len() <= store.max_capacity() as usize, - "store size should not exceed max" - ); - - store_slots.sort_unstable(); - - let expected_slots = (i.saturating_sub(max_cap - 1)..=i) - .map(Slot::new) - .collect::>(); - - assert_eq!(expected_slots, store_slots, "should have expected slots"); - } - } - - #[test] - fn mulitple_non_contiguous_slots() { - let mut store = ObservedAttestations::default(); - let max_cap = store.max_capacity(); - - let to_skip = vec![1_u64, 2, 3, 5, 6, 29, 30, 31, 32, 64]; - let slots = (0..max_cap * 3) - .into_iter() - .filter(|i| !to_skip.contains(i)) - .collect::>(); - - for &i in &slots { - if to_skip.contains(&i) { - continue; - } - - let slot = Slot::from(i); - - single_slot_test(&mut store, slot); - - /* - * Ensure that each set contains the correct number of elements. - */ - - for set in &store.sets[..] { - assert_eq!( - set.len(), - NUM_ELEMENTS, - "each store should have NUM_ELEMENTS elements" - ) - } - - /* - * Ensure that all the sets have the expected slots - */ - - let mut store_slots = store.sets.iter().map(|set| set.slot).collect::>(); - - store_slots.sort_unstable(); - - assert!( - store_slots.len() <= store.max_capacity() as usize, - "store size should not exceed max" - ); - - let lowest = store.lowest_permissible_slot.as_u64(); - let highest = slot.as_u64(); - let expected_slots = (lowest..=highest) - .filter(|i| !to_skip.contains(i)) - .map(Slot::new) - .collect::>(); - - assert_eq!( - expected_slots, - &store_slots[..], - "should have expected slots" - ); - } - } -} diff --git a/beacon_node/beacon_chain/src/observed_attesters.rs b/beacon_node/beacon_chain/src/observed_attesters.rs index c657c04933f..043105992d1 100644 --- a/beacon_node/beacon_chain/src/observed_attesters.rs +++ b/beacon_node/beacon_chain/src/observed_attesters.rs @@ -5,14 +5,29 @@ //! the same epoch. //! - `ObservedAggregators`: allows filtering aggregated attestations from the same aggregators in //! the same epoch +//! +//! Provides an additional two structs that help us filter out sync committee message and +//! contribution gossip from validators that have already published messages this slot: +//! +//! - `ObservedSyncContributors`: allows filtering sync committee messages from the same validator in +//! the same slot. +//! - `ObservedSyncAggregators`: allows filtering sync committee contributions from the same aggregators in +//! the same slot and in the same subcommittee. +use crate::types::consts::altair::TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE; use bitvec::vec::BitVec; use std::collections::{HashMap, HashSet}; +use std::hash::Hash; use std::marker::PhantomData; -use types::{Attestation, Epoch, EthSpec, Unsigned}; +use types::slot_data::SlotData; +use types::{Epoch, EthSpec, Slot, Unsigned}; -pub type ObservedAttesters = AutoPruningContainer; -pub type ObservedAggregators = AutoPruningContainer; +pub type ObservedAttesters = AutoPruningEpochContainer; +pub type ObservedSyncContributors = + AutoPruningSlotContainer, E>; +pub type ObservedAggregators = AutoPruningEpochContainer; +pub type ObservedSyncAggregators = + AutoPruningSlotContainer; #[derive(Debug, PartialEq)] pub enum Error { @@ -20,7 +35,11 @@ pub enum Error { epoch: Epoch, lowest_permissible_epoch: Epoch, }, - /// We have reached the maximum number of unique `Attestation` that can be observed in a slot. + SlotTooLow { + slot: Slot, + lowest_permissible_slot: Slot, + }, + /// We have reached the maximum number of unique items that can be observed in a slot. /// This is a DoS protection function. ReachedMaxObservationsPerSlot(usize), /// The function to obtain a set index failed, this is an internal error. @@ -48,7 +67,8 @@ pub trait Item { fn contains(&self, validator_index: usize) -> bool; } -/// Stores a `BitVec` that represents which validator indices have attested during an epoch. +/// Stores a `BitVec` that represents which validator indices have attested or sent sync committee +/// signatures during an epoch. pub struct EpochBitfield { bitfield: BitVec, } @@ -99,7 +119,7 @@ impl Item for EpochBitfield { } } -/// Stores a `HashSet` of which validator indices have created an aggregate attestation during an +/// Stores a `HashSet` of which validator indices have created an aggregate during an /// epoch. pub struct EpochHashSet { set: HashSet, @@ -138,6 +158,84 @@ impl Item for EpochHashSet { } } +/// Stores a `HashSet` of which validator indices have created a sync aggregate during a +/// slot. +pub struct SyncContributorSlotHashSet { + set: HashSet, + phantom: PhantomData, +} + +impl Item for SyncContributorSlotHashSet { + fn with_capacity(capacity: usize) -> Self { + Self { + set: HashSet::with_capacity(capacity), + phantom: PhantomData, + } + } + + /// Defaults to the `SYNC_SUBCOMMITTEE_SIZE`. + fn default_capacity() -> usize { + E::sync_subcommittee_size() + } + + fn len(&self) -> usize { + self.set.len() + } + + fn validator_count(&self) -> usize { + self.set.len() + } + + /// Inserts the `validator_index` in the set. Returns `true` if the `validator_index` was + /// already in the set. + fn insert(&mut self, validator_index: usize) -> bool { + !self.set.insert(validator_index) + } + + /// Returns `true` if the `validator_index` is in the set. + fn contains(&self, validator_index: usize) -> bool { + self.set.contains(&validator_index) + } +} + +/// Stores a `HashSet` of which validator indices have created a sync aggregate during a +/// slot. +pub struct SyncAggregatorSlotHashSet { + set: HashSet, +} + +impl Item for SyncAggregatorSlotHashSet { + fn with_capacity(capacity: usize) -> Self { + Self { + set: HashSet::with_capacity(capacity), + } + } + + /// Defaults to the `TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE`. + fn default_capacity() -> usize { + TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE as usize + } + + fn len(&self) -> usize { + self.set.len() + } + + fn validator_count(&self) -> usize { + self.set.len() + } + + /// Inserts the `validator_index` in the set. Returns `true` if the `validator_index` was + /// already in the set. + fn insert(&mut self, validator_index: usize) -> bool { + !self.set.insert(validator_index) + } + + /// Returns `true` if the `validator_index` is in the set. + fn contains(&self, validator_index: usize) -> bool { + self.set.contains(&validator_index) + } +} + /// A container that stores some number of `T` items. /// /// This container is "auto-pruning" since it gets an idea of the current slot by which @@ -146,13 +244,13 @@ impl Item for EpochHashSet { /// attestations with an epoch prior to `a.data.target.epoch - 32` will be cleared from the cache. /// /// `T` should be set to a `EpochBitfield` or `EpochHashSet`. -pub struct AutoPruningContainer { +pub struct AutoPruningEpochContainer { lowest_permissible_epoch: Epoch, items: HashMap, _phantom: PhantomData, } -impl Default for AutoPruningContainer { +impl Default for AutoPruningEpochContainer { fn default() -> Self { Self { lowest_permissible_epoch: Epoch::new(0), @@ -162,22 +260,20 @@ impl Default for AutoPruningContainer { } } -impl AutoPruningContainer { +impl AutoPruningEpochContainer { /// Observe that `validator_index` has produced attestation `a`. Returns `Ok(true)` if `a` has /// previously been observed for `validator_index`. /// /// ## Errors /// /// - `validator_index` is higher than `VALIDATOR_REGISTRY_LIMIT`. - /// - `a.data.target.slot` is earlier than `self.earliest_permissible_slot`. + /// - `a.data.target.slot` is earlier than `self.lowest_permissible_slot`. pub fn observe_validator( &mut self, - a: &Attestation, + epoch: Epoch, validator_index: usize, ) -> Result { - self.sanitize_request(a, validator_index)?; - - let epoch = a.data.target.epoch; + self.sanitize_request(epoch, validator_index)?; self.prune(epoch); @@ -211,17 +307,17 @@ impl AutoPruningContainer { /// ## Errors /// /// - `validator_index` is higher than `VALIDATOR_REGISTRY_LIMIT`. - /// - `a.data.target.slot` is earlier than `self.earliest_permissible_slot`. + /// - `a.data.target.slot` is earlier than `self.lowest_permissible_slot`. pub fn validator_has_been_observed( &self, - a: &Attestation, + epoch: Epoch, validator_index: usize, ) -> Result { - self.sanitize_request(a, validator_index)?; + self.sanitize_request(epoch, validator_index)?; let exists = self .items - .get(&a.data.target.epoch) + .get(&epoch) .map_or(false, |item| item.contains(validator_index)); Ok(exists) @@ -233,12 +329,11 @@ impl AutoPruningContainer { self.items.get(&epoch).map(|item| item.validator_count()) } - fn sanitize_request(&self, a: &Attestation, validator_index: usize) -> Result<(), Error> { + fn sanitize_request(&self, epoch: Epoch, validator_index: usize) -> Result<(), Error> { if validator_index > E::ValidatorRegistryLimit::to_usize() { return Err(Error::ValidatorIndexTooHigh(validator_index)); } - let epoch = a.data.target.epoch; let lowest_permissible_epoch = self.lowest_permissible_epoch; if epoch < lowest_permissible_epoch { return Err(Error::EpochTooLow { @@ -272,84 +367,250 @@ impl AutoPruningContainer { /// Also sets `self.lowest_permissible_epoch` with relation to `current_epoch` and /// `Self::max_capacity`. pub fn prune(&mut self, current_epoch: Epoch) { - // Taking advantage of saturating subtraction on `Slot`. - let lowest_permissible_epoch = current_epoch - (self.max_capacity().saturating_sub(1)); + let lowest_permissible_epoch = + current_epoch.saturating_sub(self.max_capacity().saturating_sub(1)); self.lowest_permissible_epoch = lowest_permissible_epoch; self.items .retain(|epoch, _item| *epoch >= lowest_permissible_epoch); } + + #[allow(dead_code)] + /// Returns the `lowest_permissible_epoch`. Used in tests. + pub(crate) fn get_lowest_permissible(&self) -> Epoch { + self.lowest_permissible_epoch + } + + /// Returns `true` if the given `index` has been stored in `self` at `epoch`. + /// + /// This is useful for doppelganger detection. + pub fn index_seen_at_epoch(&self, index: usize, epoch: Epoch) -> bool { + self.items + .get(&epoch) + .map(|item| item.contains(index)) + .unwrap_or(false) + } +} + +/// A container that stores some number of `V` items. +/// +/// This container is "auto-pruning" since it gets an idea of the current slot by which +/// sync contributions are provided to it and prunes old entries based upon that. For example, if +/// `Self::max_capacity == 3` and an attestation with `data.slot` is supplied, then all +/// sync contributions with an epoch prior to `data.slot - 3` will be cleared from the cache. +/// +/// `V` should be set to a `SyncAggregatorSlotHashSet` or a `SyncContributorSlotHashSet`. +pub struct AutoPruningSlotContainer { + lowest_permissible_slot: Slot, + items: HashMap, + _phantom: PhantomData, +} + +impl Default for AutoPruningSlotContainer { + fn default() -> Self { + Self { + lowest_permissible_slot: Slot::new(0), + items: HashMap::new(), + _phantom: PhantomData, + } + } +} + +impl AutoPruningSlotContainer { + /// Observe that `validator_index` has produced a sync committee message. Returns `Ok(true)` if + /// the sync committee message has previously been observed for `validator_index`. + /// + /// ## Errors + /// + /// - `validator_index` is higher than `VALIDATOR_REGISTRY_LIMIT`. + /// - `key.slot` is earlier than `self.lowest_permissible_slot`. + pub fn observe_validator(&mut self, key: K, validator_index: usize) -> Result { + let slot = key.get_slot(); + self.sanitize_request(slot, validator_index)?; + + self.prune(slot); + + if let Some(item) = self.items.get_mut(&key) { + Ok(item.insert(validator_index)) + } else { + // To avoid re-allocations, try and determine a rough initial capacity for the new item + // by obtaining the mean size of all items in earlier slot. + let (count, sum) = self + .items + .iter() + // Only include slots that are less than the given slot in the average. This should + // generally avoid including recent slots that are still "filling up". + .filter(|(item_key, _item)| item_key.get_slot() < slot) + .map(|(_, item)| item.len()) + .fold((0, 0), |(count, sum), len| (count + 1, sum + len)); + + let initial_capacity = sum.checked_div(count).unwrap_or_else(V::default_capacity); + + let mut item = V::with_capacity(initial_capacity); + item.insert(validator_index); + self.items.insert(key, item); + + Ok(false) + } + } + + /// Returns `Ok(true)` if the `validator_index` has already produced a conflicting sync committee message. + /// + /// ## Errors + /// + /// - `validator_index` is higher than `VALIDATOR_REGISTRY_LIMIT`. + /// - `key.slot` is earlier than `self.lowest_permissible_slot`. + pub fn validator_has_been_observed( + &self, + key: K, + validator_index: usize, + ) -> Result { + self.sanitize_request(key.get_slot(), validator_index)?; + + let exists = self + .items + .get(&key) + .map_or(false, |item| item.contains(validator_index)); + + Ok(exists) + } + + /// Returns the number of validators that have been observed at the given `slot`. Returns + /// `None` if `self` does not have a cache for that slot. + pub fn observed_validator_count(&self, key: K) -> Option { + self.items.get(&key).map(|item| item.validator_count()) + } + + fn sanitize_request(&self, slot: Slot, validator_index: usize) -> Result<(), Error> { + if validator_index > E::ValidatorRegistryLimit::to_usize() { + return Err(Error::ValidatorIndexTooHigh(validator_index)); + } + + let lowest_permissible_slot = self.lowest_permissible_slot; + if slot < lowest_permissible_slot { + return Err(Error::SlotTooLow { + slot, + lowest_permissible_slot, + }); + } + + Ok(()) + } + + /// The maximum number of slots stored in `self`. + fn max_capacity(&self) -> u64 { + // The next, current and previous slots. We require the next slot due to the + // `MAXIMUM_GOSSIP_CLOCK_DISPARITY`. + 3 + } + + /// Updates `self` with the current slot, removing all sync committee messages that become expired + /// relative to `Self::max_capacity`. + /// + /// Also sets `self.lowest_permissible_slot` with relation to `current_slot` and + /// `Self::max_capacity`. + pub fn prune(&mut self, current_slot: Slot) { + let lowest_permissible_slot = + current_slot.saturating_sub(self.max_capacity().saturating_sub(1)); + + self.lowest_permissible_slot = lowest_permissible_slot; + + self.items + .retain(|key, _item| key.get_slot() >= lowest_permissible_slot); + } + + #[allow(dead_code)] + /// Returns the `lowest_permissible_slot`. Used in tests. + pub(crate) fn get_lowest_permissible(&self) -> Slot { + self.lowest_permissible_slot + } +} + +/// This is used to key information about sync committee aggregators. We require the +/// `subcommittee_index` because it is possible that a validator can aggregate for multiple +/// subcommittees in the same slot. +#[derive(Eq, PartialEq, Hash, Clone, Copy, PartialOrd, Ord, Debug)] +pub struct SlotSubcommitteeIndex { + slot: Slot, + subcommittee_index: u64, +} + +impl SlotData for SlotSubcommitteeIndex { + fn get_slot(&self) -> Slot { + self.slot + } +} + +impl SlotSubcommitteeIndex { + pub fn new(slot: Slot, subcommittee_index: u64) -> Self { + Self { + slot, + subcommittee_index, + } + } } #[cfg(test)] mod tests { use super::*; - macro_rules! test_suite { + type E = types::MainnetEthSpec; + + macro_rules! test_suite_epoch { ($mod_name: ident, $type: ident) => { #[cfg(test)] mod $mod_name { use super::*; - use types::test_utils::test_random_instance; - type E = types::MainnetEthSpec; + fn single_period_test(store: &mut $type, period: Epoch) { + let validator_indices = [0, 1, 2, 3, 5, 6, 7, 18, 22]; - fn get_attestation(epoch: Epoch) -> Attestation { - let mut a: Attestation = test_random_instance(); - a.data.target.epoch = epoch; - a - } - - fn single_epoch_test(store: &mut $type, epoch: Epoch) { - let attesters = [0, 1, 2, 3, 5, 6, 7, 18, 22]; - let a = &get_attestation(epoch); - - for &i in &attesters { + for &i in &validator_indices { assert_eq!( - store.validator_has_been_observed(a, i), + store.validator_has_been_observed(period, i), Ok(false), - "should indicate an unknown attestation is unknown" + "should indicate an unknown item is unknown" ); assert_eq!( - store.observe_validator(a, i), + store.observe_validator(period, i), Ok(false), - "should observe new attestation" + "should observe new item" ); } - for &i in &attesters { + for &i in &validator_indices { assert_eq!( - store.validator_has_been_observed(a, i), + store.validator_has_been_observed(period, i), Ok(true), - "should indicate a known attestation is known" + "should indicate a known item is known" ); assert_eq!( - store.observe_validator(a, i), + store.observe_validator(period, i), Ok(true), - "should acknowledge an existing attestation" + "should acknowledge an existing item" ); } } #[test] - fn single_epoch() { + fn single_period() { let mut store = $type::default(); - single_epoch_test(&mut store, Epoch::new(0)); + single_period_test(&mut store, Epoch::new(0)); assert_eq!(store.items.len(), 1, "should have a single bitfield stored"); } #[test] - fn mulitple_contiguous_epochs() { + fn mulitple_contiguous_periods() { let mut store = $type::default(); let max_cap = store.max_capacity(); for i in 0..max_cap * 3 { - let epoch = Epoch::new(i); + let period = Epoch::new(i); - single_epoch_test(&mut store, epoch); + single_period_test(&mut store, period); /* * Ensure that the number of sets is correct. @@ -374,74 +635,77 @@ mod tests { * Ensure that all the sets have the expected slots */ - let mut store_epochs = store + let mut store_periods = store .items .iter() - .map(|(epoch, _set)| *epoch) + .map(|(period, _set)| *period) .collect::>(); assert!( - store_epochs.len() <= store.max_capacity() as usize, + store_periods.len() <= store.max_capacity() as usize, "store size should not exceed max" ); - store_epochs.sort_unstable(); + store_periods.sort_unstable(); - let expected_epochs = (i.saturating_sub(max_cap - 1)..=i) + let expected_periods = (i.saturating_sub(max_cap - 1)..=i) .map(Epoch::new) .collect::>(); - assert_eq!(expected_epochs, store_epochs, "should have expected slots"); + assert_eq!( + expected_periods, store_periods, + "should have expected slots" + ); } } #[test] - fn mulitple_non_contiguous_epochs() { + fn mulitple_non_contiguous_periods() { let mut store = $type::default(); let max_cap = store.max_capacity(); let to_skip = vec![1_u64, 3, 4, 5]; - let epochs = (0..max_cap * 3) + let periods = (0..max_cap * 3) .into_iter() .filter(|i| !to_skip.contains(i)) .collect::>(); - for &i in &epochs { + for &i in &periods { if to_skip.contains(&i) { continue; } - let epoch = Epoch::from(i); + let period = Epoch::from(i); - single_epoch_test(&mut store, epoch); + single_period_test(&mut store, period); /* * Ensure that all the sets have the expected slots */ - let mut store_epochs = store + let mut store_periods = store .items .iter() - .map(|(epoch, _)| *epoch) + .map(|(period, _)| *period) .collect::>(); - store_epochs.sort_unstable(); + store_periods.sort_unstable(); assert!( - store_epochs.len() <= store.max_capacity() as usize, + store_periods.len() <= store.max_capacity() as usize, "store size should not exceed max" ); - let lowest = store.lowest_permissible_epoch.as_u64(); - let highest = epoch.as_u64(); - let expected_epochs = (lowest..=highest) + let lowest = store.get_lowest_permissible().as_u64(); + let highest = period.as_u64(); + let expected_periods = (lowest..=highest) .filter(|i| !to_skip.contains(i)) .map(Epoch::new) .collect::>(); assert_eq!( - expected_epochs, - &store_epochs[..], + expected_periods, + &store_periods[..], "should have expected epochs" ); } @@ -450,6 +714,285 @@ mod tests { }; } - test_suite!(observed_attesters, ObservedAttesters); - test_suite!(observed_aggregators, ObservedAggregators); + test_suite_epoch!(observed_attesters, ObservedAttesters); + test_suite_epoch!(observed_aggregators, ObservedAggregators); + + macro_rules! test_suite_slot { + ($mod_name: ident, $type: ident) => { + #[cfg(test)] + mod $mod_name { + use super::*; + + fn single_period_test(store: &mut $type, key: SlotSubcommitteeIndex) { + let validator_indices = [0, 1, 2, 3, 5, 6, 7, 18, 22]; + + for &i in &validator_indices { + assert_eq!( + store.validator_has_been_observed(key, i), + Ok(false), + "should indicate an unknown item is unknown" + ); + assert_eq!( + store.observe_validator(key, i), + Ok(false), + "should observe new item" + ); + } + + for &i in &validator_indices { + assert_eq!( + store.validator_has_been_observed(key, i), + Ok(true), + "should indicate a known item is known" + ); + assert_eq!( + store.observe_validator(key, i), + Ok(true), + "should acknowledge an existing item" + ); + } + } + + #[test] + fn single_period() { + let mut store = $type::default(); + + single_period_test(&mut store, SlotSubcommitteeIndex::new(Slot::new(0), 0)); + + assert_eq!(store.items.len(), 1, "should have a single bitfield stored"); + } + + #[test] + fn single_period_multiple_subcommittees() { + let mut store = $type::default(); + + single_period_test(&mut store, SlotSubcommitteeIndex::new(Slot::new(0), 0)); + single_period_test(&mut store, SlotSubcommitteeIndex::new(Slot::new(0), 1)); + single_period_test(&mut store, SlotSubcommitteeIndex::new(Slot::new(0), 2)); + + assert_eq!(store.items.len(), 3, "should have three hash sets stored"); + } + + #[test] + fn mulitple_contiguous_periods_same_subcommittee() { + let mut store = $type::default(); + let max_cap = store.max_capacity(); + + for i in 0..max_cap * 3 { + let period = SlotSubcommitteeIndex::new(Slot::new(i), 0); + + single_period_test(&mut store, period); + + /* + * Ensure that the number of sets is correct. + */ + + if i < max_cap { + assert_eq!( + store.items.len(), + i as usize + 1, + "should have a {} items stored", + i + 1 + ); + } else { + assert_eq!( + store.items.len(), + max_cap as usize, + "should have max_capacity items stored" + ); + } + + /* + * Ensure that all the sets have the expected slots + */ + + let mut store_periods = store + .items + .iter() + .map(|(period, _set)| *period) + .collect::>(); + + assert!( + store_periods.len() <= store.max_capacity() as usize, + "store size should not exceed max" + ); + + store_periods.sort_unstable(); + + let expected_periods = (i.saturating_sub(max_cap - 1)..=i) + .map(|i| SlotSubcommitteeIndex::new(Slot::new(i), 0)) + .collect::>(); + + assert_eq!( + expected_periods, store_periods, + "should have expected slots" + ); + } + } + + #[test] + fn mulitple_non_contiguous_periods_same_subcommitte() { + let mut store = $type::default(); + let max_cap = store.max_capacity(); + + let to_skip = vec![1_u64, 3, 4, 5]; + let periods = (0..max_cap * 3) + .into_iter() + .filter(|i| !to_skip.contains(i)) + .collect::>(); + + for &i in &periods { + if to_skip.contains(&i) { + continue; + } + + let period = SlotSubcommitteeIndex::new(Slot::from(i), 0); + + single_period_test(&mut store, period); + + /* + * Ensure that all the sets have the expected slots + */ + + let mut store_periods = store + .items + .iter() + .map(|(period, _)| *period) + .collect::>(); + + store_periods.sort_unstable(); + + assert!( + store_periods.len() <= store.max_capacity() as usize, + "store size should not exceed max" + ); + + let lowest = store.get_lowest_permissible().as_u64(); + let highest = period.slot.as_u64(); + let expected_periods = (lowest..=highest) + .filter(|i| !to_skip.contains(i)) + .map(|i| SlotSubcommitteeIndex::new(Slot::new(i), 0)) + .collect::>(); + + assert_eq!( + expected_periods, + &store_periods[..], + "should have expected epochs" + ); + } + } + + #[test] + fn mulitple_contiguous_periods_different_subcommittee() { + let mut store = $type::default(); + let max_cap = store.max_capacity(); + + for i in 0..max_cap * 3 { + let period = SlotSubcommitteeIndex::new(Slot::new(i), i); + + single_period_test(&mut store, period); + + /* + * Ensure that the number of sets is correct. + */ + + if i < max_cap { + assert_eq!( + store.items.len(), + i as usize + 1, + "should have a {} items stored", + i + 1 + ); + } else { + assert_eq!( + store.items.len(), + max_cap as usize, + "should have max_capacity items stored" + ); + } + + /* + * Ensure that all the sets have the expected slots + */ + + let mut store_periods = store + .items + .iter() + .map(|(period, _set)| *period) + .collect::>(); + + assert!( + store_periods.len() <= store.max_capacity() as usize, + "store size should not exceed max" + ); + + store_periods.sort_unstable(); + + let expected_periods = (i.saturating_sub(max_cap - 1)..=i) + .map(|i| SlotSubcommitteeIndex::new(Slot::new(i), i)) + .collect::>(); + + assert_eq!( + expected_periods, store_periods, + "should have expected slots" + ); + } + } + + #[test] + fn mulitple_non_contiguous_periods_different_subcommitte() { + let mut store = $type::default(); + let max_cap = store.max_capacity(); + + let to_skip = vec![1_u64, 3, 4, 5]; + let periods = (0..max_cap * 3) + .into_iter() + .filter(|i| !to_skip.contains(i)) + .collect::>(); + + for &i in &periods { + if to_skip.contains(&i) { + continue; + } + + let period = SlotSubcommitteeIndex::new(Slot::from(i), i); + + single_period_test(&mut store, period); + + /* + * Ensure that all the sets have the expected slots + */ + + let mut store_periods = store + .items + .iter() + .map(|(period, _)| *period) + .collect::>(); + + store_periods.sort_unstable(); + + assert!( + store_periods.len() <= store.max_capacity() as usize, + "store size should not exceed max" + ); + + let lowest = store.get_lowest_permissible().as_u64(); + let highest = period.slot.as_u64(); + let expected_periods = (lowest..=highest) + .filter(|i| !to_skip.contains(i)) + .map(|i| SlotSubcommitteeIndex::new(Slot::new(i), i)) + .collect::>(); + + assert_eq!( + expected_periods, + &store_periods[..], + "should have expected epochs" + ); + } + } + } + }; + } + test_suite_slot!(observed_sync_contributors, ObservedSyncContributors); + test_suite_slot!(observed_sync_aggregators, ObservedSyncAggregators); } diff --git a/beacon_node/beacon_chain/src/observed_block_producers.rs b/beacon_node/beacon_chain/src/observed_block_producers.rs index 66e036f36fc..b5995121b99 100644 --- a/beacon_node/beacon_chain/src/observed_block_producers.rs +++ b/beacon_node/beacon_chain/src/observed_block_producers.rs @@ -3,7 +3,7 @@ use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; -use types::{BeaconBlockRef, EthSpec, Slot, Unsigned}; +use types::{BeaconBlockRef, Epoch, EthSpec, Slot, Unsigned}; #[derive(Debug, PartialEq)] pub enum Error { @@ -114,6 +114,15 @@ impl ObservedBlockProducers { self.finalized_slot = finalized_slot; self.items.retain(|slot, _set| *slot > finalized_slot); } + + /// Returns `true` if the given `validator_index` has been stored in `self` at `epoch`. + /// + /// This is useful for doppelganger detection. + pub fn index_seen_at_epoch(&self, validator_index: u64, epoch: Epoch) -> bool { + self.items.iter().any(|(slot, producers)| { + slot.epoch(E::slots_per_epoch()) == epoch && producers.contains(&validator_index) + }) + } } #[cfg(test)] diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index ca3e6efbbfb..a96e1e7c3ce 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -1,6 +1,7 @@ //! Utilities for managing database schema changes. -use crate::beacon_chain::BeaconChainTypes; +use crate::beacon_chain::{BeaconChainTypes, OP_POOL_DB_KEY}; use crate::validator_pubkey_cache::ValidatorPubkeyCache; +use operation_pool::{PersistedOperationPool, PersistedOperationPoolBase}; use std::fs; use std::path::Path; use std::sync::Arc; @@ -54,6 +55,24 @@ pub fn migrate_schema( Ok(()) } + // Migration for adding sync committee contributions to the persisted op pool. + (SchemaVersion(3), SchemaVersion(4)) => { + // Deserialize from what exists in the database using the `PersistedOperationPoolBase` + // variant and convert it to the Altair variant. + let pool_opt = db + .get_item::>(&OP_POOL_DB_KEY)? + .map(PersistedOperationPool::Base) + .map(PersistedOperationPool::base_to_altair); + + if let Some(pool) = pool_opt { + // Store the converted pool under the same key. + db.put_item::>(&OP_POOL_DB_KEY, &pool)?; + } + + db.store_schema_version(to)?; + + Ok(()) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index eea329a2a3d..947e8c38e05 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -233,15 +233,32 @@ fn advance_head( if let Some(summary) = per_slot_processing(&mut state, state_root, &beacon_chain.spec) .map_err(BeaconChainError::from)? { + // Expose Prometheus metrics. + if let Err(e) = summary.observe_metrics() { + error!( + log, + "Failed to observe epoch summary metrics"; + "src" => "state_advance_timer", + "error" => ?e + ); + } + // Only notify the validator monitor for recent blocks. if state.current_epoch() + VALIDATOR_MONITOR_HISTORIC_EPOCHS as u64 >= current_slot.epoch(T::EthSpec::slots_per_epoch()) { // Potentially create logs/metrics for locally monitored validators. - beacon_chain + if let Err(e) = beacon_chain .validator_monitor .read() - .process_validator_statuses(state.current_epoch(), &summary.statuses); + .process_validator_statuses(state.current_epoch(), &summary, &beacon_chain.spec) + { + error!( + log, + "Unable to process validator statuses"; + "error" => ?e + ); + } } } @@ -304,6 +321,12 @@ fn advance_head( ); } + // Apply the state to the attester cache, if the cache deems it interesting. + beacon_chain + .attester_cache + .maybe_cache_state(&state, head_root, &beacon_chain.spec) + .map_err(BeaconChainError::from)?; + let final_slot = state.slot(); // Insert the advanced state back into the snapshot cache. diff --git a/beacon_node/beacon_chain/src/sync_committee_verification.rs b/beacon_node/beacon_chain/src/sync_committee_verification.rs new file mode 100644 index 00000000000..403ef683a76 --- /dev/null +++ b/beacon_node/beacon_chain/src/sync_committee_verification.rs @@ -0,0 +1,658 @@ +//! Provides verification for the following sync committee messages: +//! +//! - "Unaggregated" `SyncCommitteeMessage` received from either gossip or the HTTP API. +//! - "Aggregated" `SignedContributionAndProof` received from gossip or the HTTP API. +//! +//! For clarity, we define: +//! +//! - Unaggregated: a `SyncCommitteeMessage` object. +//! - Aggregated: a `SignedContributionAndProof` which has zero or more signatures. +//! - Note: "zero or more" may soon change to "one or more". +//! +//! Similar to the `crate::block_verification` module, we try to avoid doing duplicate verification +//! work as a sync committee message passes through different stages of verification. We represent these +//! different stages of verification with wrapper types. These wrapper-types flow in a particular +//! pattern: +//! +//! ```ignore +//! types::SyncCommitteeMessage types::SignedContributionAndProof +//! | | +//! â–¼ â–¼ +//! VerifiedSyncCommitteeMessage VerifiedSyncContribution +//! | | +//! ------------------------------------- +//! | +//! â–¼ +//! impl SignatureVerifiedSyncContribution +//! ``` + +use crate::observed_attesters::SlotSubcommitteeIndex; +use crate::{ + beacon_chain::{MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT}, + metrics, + observed_aggregates::ObserveOutcome, + BeaconChain, BeaconChainError, BeaconChainTypes, +}; +use bls::{verify_signature_sets, PublicKeyBytes}; +use derivative::Derivative; +use safe_arith::ArithError; +use slot_clock::SlotClock; +use state_processing::per_block_processing::errors::SyncCommitteeMessageValidationError; +use state_processing::signature_sets::{ + signed_sync_aggregate_selection_proof_signature_set, signed_sync_aggregate_signature_set, + sync_committee_contribution_signature_set_from_pubkeys, + sync_committee_message_set_from_pubkeys, +}; +use std::borrow::Cow; +use std::collections::HashMap; +use strum::AsRefStr; +use tree_hash::TreeHash; +use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; +use types::slot_data::SlotData; +use types::sync_committee::Error as SyncCommitteeError; +use types::{ + sync_committee_contribution::Error as ContributionError, AggregateSignature, BeaconStateError, + EthSpec, Hash256, SignedContributionAndProof, Slot, SyncCommitteeContribution, + SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, +}; + +/// Returned when a sync committee contribution was not successfully verified. It might not have been verified for +/// two reasons: +/// +/// - The sync committee message is malformed or inappropriate for the context (indicated by all variants +/// other than `BeaconChainError`). +/// - The application encountered an internal error whilst attempting to determine validity +/// (the `BeaconChainError` variant) +#[derive(Debug, AsRefStr)] +pub enum Error { + /// The sync committee message is from a slot that is later than the current slot (with respect to the + /// gossip clock disparity). + /// + /// ## Peer scoring + /// + /// Assuming the local clock is correct, the peer has sent an invalid message. + FutureSlot { + message_slot: Slot, + latest_permissible_slot: Slot, + }, + /// The sync committee message is from a slot that is prior to the earliest permissible slot (with + /// respect to the gossip clock disparity). + /// + /// ## Peer scoring + /// + /// Assuming the local clock is correct, the peer has sent an invalid message. + PastSlot { + message_slot: Slot, + earliest_permissible_slot: Slot, + }, + /// The sync committee message's aggregation bits were empty when they shouldn't be. + /// + /// ## Peer scoring + /// + /// The peer has sent an invalid message. + EmptyAggregationBitfield, + /// The `selection_proof` on the sync contribution does not elect it as an aggregator. + /// + /// ## Peer scoring + /// + /// The peer has sent an invalid message. + InvalidSelectionProof { aggregator_index: u64 }, + /// The `selection_proof` on the sync committee contribution selects it as a validator, however the + /// aggregator index is not in the committee for that sync contribution. + /// + /// ## Peer scoring + /// + /// The peer has sent an invalid message. + AggregatorNotInCommittee { aggregator_index: u64 }, + /// The aggregator index refers to a validator index that we have not seen. + /// + /// ## Peer scoring + /// + /// The peer has sent an invalid message. + AggregatorPubkeyUnknown(u64), + /// The sync contribution has been seen before; either in a block, on the gossip network or from a + /// local validator. + /// + /// ## Peer scoring + /// + /// It's unclear if this sync contribution is valid, however we have already observed it and do not + /// need to observe it again. + SyncContributionAlreadyKnown(Hash256), + /// There has already been an aggregation observed for this validator, we refuse to process a + /// second. + /// + /// ## Peer scoring + /// + /// It's unclear if this sync committee message is valid, however we have already observed an aggregate + /// sync committee message from this validator for this epoch and should not observe another. + AggregatorAlreadyKnown(u64), + /// The aggregator index is higher than the maximum possible validator count. + /// + /// ## Peer scoring + /// + /// The peer has sent an invalid message. + UnknownValidatorIndex(usize), + /// The public key of the validator has not been seen locally. + /// + /// ## Peer scoring + /// + /// It's unclear if this sync committee message is valid, however we have already observed an aggregate + /// sync committee message from this validator for this epoch and should not observe another. + UnknownValidatorPubkey(PublicKeyBytes), + /// A signature on the sync committee message is invalid. + /// + /// ## Peer scoring + /// + /// The peer has sent an invalid message. + InvalidSignature, + /// We have already observed a signature for the `validator_index` and refuse to process + /// another. + /// + /// ## Peer scoring + /// + /// It's unclear if this sync message is valid, however we have already observed a + /// signature from this validator for this slot and should not observe + /// another. + PriorSyncCommitteeMessageKnown { validator_index: u64, slot: Slot }, + /// The sync committee message was received on an invalid sync committee message subnet. + /// + /// ## Peer scoring + /// + /// The peer has sent an invalid message. + InvalidSubnetId { + received: SyncSubnetId, + expected: Vec, + }, + /// The sync message failed the `state_processing` verification stage. + /// + /// ## Peer scoring + /// + /// The peer has sent an invalid message. + Invalid(SyncCommitteeMessageValidationError), + /// There was an error whilst processing the sync contribution. It is not known if it is valid or invalid. + /// + /// ## Peer scoring + /// + /// We were unable to process this sync committee message due to an internal error. It's unclear if the + /// sync committee message is valid. + BeaconChainError(BeaconChainError), + /// There was an error whilst processing the sync contribution. It is not known if it is valid or invalid. + /// + /// ## Peer scoring + /// + /// We were unable to process this sync committee message due to an internal error. It's unclear if the + /// sync committee message is valid. + BeaconStateError(BeaconStateError), + /// There was an error whilst processing the sync contribution. It is not known if it is valid or invalid. + /// + /// ## Peer scoring + /// + /// We were unable to process this sync committee message due to an internal error. It's unclear if the + /// sync committee message is valid. + InvalidSubcommittee { + subcommittee_index: u64, + subcommittee_size: u64, + }, + /// There was an error whilst processing the sync contribution. It is not known if it is valid or invalid. + /// + /// ## Peer scoring + /// + /// We were unable to process this sync committee message due to an internal error. It's unclear if the + /// sync committee message is valid. + ArithError(ArithError), + /// There was an error whilst processing the sync contribution. It is not known if it is valid or invalid. + /// + /// ## Peer scoring + /// + /// We were unable to process this sync committee message due to an internal error. It's unclear if the + /// sync committee message is valid. + ContributionError(ContributionError), + /// There was an error whilst processing the sync contribution. It is not known if it is valid or invalid. + /// + /// ## Peer scoring + /// + /// We were unable to process this sync committee message due to an internal error. It's unclear if the + /// sync committee message is valid. + SyncCommitteeError(SyncCommitteeError), +} + +impl From for Error { + fn from(e: BeaconChainError) -> Self { + Error::BeaconChainError(e) + } +} + +impl From for Error { + fn from(e: BeaconStateError) -> Self { + Error::BeaconStateError(e) + } +} + +impl From for Error { + fn from(e: SyncCommitteeError) -> Self { + Error::SyncCommitteeError(e) + } +} + +impl From for Error { + fn from(e: ArithError) -> Self { + Error::ArithError(e) + } +} + +impl From for Error { + fn from(e: ContributionError) -> Self { + Error::ContributionError(e) + } +} + +/// Wraps a `SignedContributionAndProof` that has been verified for propagation on the gossip network.\ +#[derive(Derivative)] +#[derivative(Clone(bound = "T: BeaconChainTypes"))] +pub struct VerifiedSyncContribution { + signed_aggregate: SignedContributionAndProof, +} + +/// Wraps a `SyncCommitteeMessage` that has been verified for propagation on the gossip network. +#[derive(Clone)] +pub struct VerifiedSyncCommitteeMessage { + sync_message: SyncCommitteeMessage, + subnet_positions: HashMap>, +} + +impl VerifiedSyncContribution { + /// Returns `Ok(Self)` if the `signed_aggregate` is valid to be (re)published on the gossip + /// network. + pub fn verify( + signed_aggregate: SignedContributionAndProof, + chain: &BeaconChain, + ) -> Result { + let aggregator_index = signed_aggregate.message.aggregator_index; + let contribution = &signed_aggregate.message.contribution; + let subcommittee_index = contribution.subcommittee_index as usize; + + // Ensure sync committee contribution is within the MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance. + verify_propagation_slot_range(chain, contribution)?; + + // Validate subcommittee index. + if contribution.subcommittee_index >= SYNC_COMMITTEE_SUBNET_COUNT { + return Err(Error::InvalidSubcommittee { + subcommittee_index: contribution.subcommittee_index, + subcommittee_size: SYNC_COMMITTEE_SUBNET_COUNT, + }); + } + + // Ensure that the sync committee message has participants. + if contribution.aggregation_bits.is_zero() { + return Err(Error::EmptyAggregationBitfield); + } + + // Ensure the aggregator's pubkey is in the declared subcommittee of the current sync committee + let pubkey_bytes = chain + .validator_pubkey_bytes(aggregator_index as usize)? + .ok_or(Error::UnknownValidatorIndex(aggregator_index as usize))?; + let sync_subcommittee_pubkeys = chain + .sync_committee_at_next_slot(contribution.get_slot())? + .get_subcommittee_pubkeys(subcommittee_index)?; + + if !sync_subcommittee_pubkeys.contains(&pubkey_bytes) { + return Err(Error::AggregatorNotInCommittee { aggregator_index }); + }; + + // Ensure the valid sync contribution has not already been seen locally. + let contribution_root = contribution.tree_hash_root(); + if chain + .observed_sync_contributions + .write() + .is_known(contribution, contribution_root) + .map_err(|e| Error::BeaconChainError(e.into()))? + { + return Err(Error::SyncContributionAlreadyKnown(contribution_root)); + } + + // Ensure there has been no other observed aggregate for the given `aggregator_index`. + // + // Note: do not observe yet, only observe once the sync contribution has been verified. + let observed_key = + SlotSubcommitteeIndex::new(contribution.slot, contribution.subcommittee_index); + match chain + .observed_sync_aggregators + .read() + .validator_has_been_observed(observed_key, aggregator_index as usize) + { + Ok(true) => Err(Error::AggregatorAlreadyKnown(aggregator_index)), + Ok(false) => Ok(()), + Err(e) => Err(BeaconChainError::from(e).into()), + }?; + + // Note: this clones the signature which is known to be a relatively slow operation. + // + // Future optimizations should remove this clone. + let selection_proof = + SyncSelectionProof::from(signed_aggregate.message.selection_proof.clone()); + + if !selection_proof + .is_aggregator::() + .map_err(|e| Error::BeaconChainError(e.into()))? + { + return Err(Error::InvalidSelectionProof { aggregator_index }); + } + + // Gather all validator pubkeys that signed this contribution. + let participant_pubkeys = sync_subcommittee_pubkeys + .into_iter() + .zip(contribution.aggregation_bits.iter()) + .filter_map(|(pubkey, bit)| bit.then(|| pubkey)) + .collect::>(); + + // Ensure that all signatures are valid. + if !verify_signed_aggregate_signatures( + chain, + &signed_aggregate, + participant_pubkeys.as_slice(), + )? { + return Err(Error::InvalidSignature); + } + + let contribution = &signed_aggregate.message.contribution; + let aggregator_index = signed_aggregate.message.aggregator_index; + + // Observe the valid sync contribution so we do not re-process it. + // + // It's important to double check that the contribution is not already known, otherwise two + // contribution processed at the same time could be published. + if let ObserveOutcome::AlreadyKnown = chain + .observed_sync_contributions + .write() + .observe_item(contribution, Some(contribution_root)) + .map_err(|e| Error::BeaconChainError(e.into()))? + { + return Err(Error::SyncContributionAlreadyKnown(contribution_root)); + } + + // Observe the aggregator so we don't process another aggregate from them. + // + // It's important to double check that the sync committee message is not already known, otherwise two + // sync committee messages processed at the same time could be published. + if chain + .observed_sync_aggregators + .write() + .observe_validator(observed_key, aggregator_index as usize) + .map_err(BeaconChainError::from)? + { + return Err(Error::PriorSyncCommitteeMessageKnown { + validator_index: aggregator_index, + slot: contribution.slot, + }); + } + Ok(VerifiedSyncContribution { signed_aggregate }) + } + + /// A helper function to add this aggregate to `beacon_chain.op_pool`. + pub fn add_to_pool(self, chain: &BeaconChain) -> Result<(), Error> { + chain.add_contribution_to_block_inclusion_pool(self) + } + + /// Returns the underlying `contribution` for the `signed_aggregate`. + pub fn contribution(self) -> SyncCommitteeContribution { + self.signed_aggregate.message.contribution + } + + /// Returns the underlying `signed_aggregate`. + pub fn aggregate(&self) -> &SignedContributionAndProof { + &self.signed_aggregate + } +} + +impl VerifiedSyncCommitteeMessage { + /// Returns `Ok(Self)` if the `sync_message` is valid to be (re)published on the gossip + /// network. + /// + /// `subnet_id` is the subnet from which we received this sync message. This function will + /// verify that it was received on the correct subnet. + pub fn verify( + sync_message: SyncCommitteeMessage, + subnet_id: SyncSubnetId, + chain: &BeaconChain, + ) -> Result { + // Ensure sync committee message is for the current slot (within a + // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance). + // + // We do not queue future sync committee messages for later processing. + verify_propagation_slot_range(chain, &sync_message)?; + + // Ensure the `subnet_id` is valid for the given validator. + let pubkey = chain + .validator_pubkey_bytes(sync_message.validator_index as usize)? + .ok_or(Error::UnknownValidatorIndex( + sync_message.validator_index as usize, + ))?; + + let sync_committee = chain.sync_committee_at_next_slot(sync_message.get_slot())?; + let subnet_positions = sync_committee.subcommittee_positions_for_public_key(&pubkey)?; + + if !subnet_positions.contains_key(&subnet_id) { + return Err(Error::InvalidSubnetId { + received: subnet_id, + expected: subnet_positions.keys().cloned().collect::>(), + }); + } + + // The sync committee message is the first valid message received for the participating validator + // for the slot, sync_message.slot. + let validator_index = sync_message.validator_index; + if chain + .observed_sync_contributors + .read() + .validator_has_been_observed( + SlotSubcommitteeIndex::new(sync_message.slot, subnet_id.into()), + validator_index as usize, + ) + .map_err(BeaconChainError::from)? + { + return Err(Error::PriorSyncCommitteeMessageKnown { + validator_index, + slot: sync_message.slot, + }); + } + + // The aggregate signature of the sync committee message is valid. + verify_sync_committee_message(chain, &sync_message, &pubkey)?; + + // Now that the sync committee message has been fully verified, store that we have received a valid + // sync committee message from this validator. + // + // It's important to double check that the sync committee message still hasn't been observed, since + // there can be a race-condition if we receive two sync committee messages at the same time and + // process them in different threads. + if chain + .observed_sync_contributors + .write() + .observe_validator( + SlotSubcommitteeIndex::new(sync_message.slot, subnet_id.into()), + validator_index as usize, + ) + .map_err(BeaconChainError::from)? + { + return Err(Error::PriorSyncCommitteeMessageKnown { + validator_index, + slot: sync_message.slot, + }); + } + + Ok(Self { + sync_message, + subnet_positions, + }) + } + + /// A helper function to add this sync committee message to `beacon_chain.naive_sync_aggregation_pool`. + pub fn add_to_pool(self, chain: &BeaconChain) -> Result { + chain.add_to_naive_sync_aggregation_pool(self) + } + + /// Returns the subcommittee positions for the sync message, keyed on the `SyncSubnetId` for + /// the subnets the signature should be sent on. + pub fn subnet_positions(&self) -> &HashMap> { + &self.subnet_positions + } + + /// Returns the wrapped `SyncCommitteeMessage`. + pub fn sync_message(&self) -> &SyncCommitteeMessage { + &self.sync_message + } +} + +/// Verify that the `sync_contribution` is within the acceptable gossip propagation range, with reference +/// to the current slot of the `chain`. +/// +/// Accounts for `MAXIMUM_GOSSIP_CLOCK_DISPARITY`. +pub fn verify_propagation_slot_range( + chain: &BeaconChain, + sync_contribution: &U, +) -> Result<(), Error> { + let message_slot = sync_contribution.get_slot(); + + let latest_permissible_slot = chain + .slot_clock + .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) + .ok_or(BeaconChainError::UnableToReadSlot)?; + if message_slot > latest_permissible_slot { + return Err(Error::FutureSlot { + message_slot, + latest_permissible_slot, + }); + } + + let earliest_permissible_slot = chain + .slot_clock + .now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) + .ok_or(BeaconChainError::UnableToReadSlot)?; + + if message_slot < earliest_permissible_slot { + return Err(Error::PastSlot { + message_slot, + earliest_permissible_slot, + }); + } + + Ok(()) +} + +/// Verifies all the signatures in a `SignedContributionAndProof` using BLS batch verification. This +/// includes three signatures: +/// +/// - `signed_aggregate.signature` +/// - `signed_aggregate.message.selection_proof` +/// - `signed_aggregate.message.aggregate.signature` +/// +/// # Returns +/// +/// - `Ok(true)`: if all signatures are valid. +/// - `Ok(false)`: if one or more signatures are invalid. +/// - `Err(e)`: if there was an error preventing signature verification. +pub fn verify_signed_aggregate_signatures( + chain: &BeaconChain, + signed_aggregate: &SignedContributionAndProof, + participant_pubkeys: &[PublicKeyBytes], +) -> Result { + let pubkey_cache = chain + .validator_pubkey_cache + .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) + .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?; + + let aggregator_index = signed_aggregate.message.aggregator_index; + if aggregator_index >= pubkey_cache.len() as u64 { + return Err(Error::AggregatorPubkeyUnknown(aggregator_index)); + } + + let next_slot_epoch = + (signed_aggregate.message.contribution.slot + 1).epoch(T::EthSpec::slots_per_epoch()); + let fork = chain.spec.fork_at_epoch(next_slot_epoch); + + let signature_sets = vec![ + signed_sync_aggregate_selection_proof_signature_set( + |validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed), + signed_aggregate, + &fork, + chain.genesis_validators_root, + &chain.spec, + ) + .map_err(BeaconChainError::SignatureSetError)?, + signed_sync_aggregate_signature_set( + |validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed), + signed_aggregate, + &fork, + chain.genesis_validators_root, + &chain.spec, + ) + .map_err(BeaconChainError::SignatureSetError)?, + sync_committee_contribution_signature_set_from_pubkeys::( + |validator_index| { + pubkey_cache + .get_pubkey_from_pubkey_bytes(validator_index) + .map(Cow::Borrowed) + }, + participant_pubkeys, + &signed_aggregate.message.contribution.signature, + signed_aggregate + .message + .contribution + .slot + .epoch(T::EthSpec::slots_per_epoch()), + signed_aggregate.message.contribution.beacon_block_root, + &fork, + chain.genesis_validators_root, + &chain.spec, + ) + .map_err(BeaconChainError::SignatureSetError)?, + ]; + + Ok(verify_signature_sets(signature_sets.iter())) +} + +/// Verifies that the signature of the `sync_message` is valid. +pub fn verify_sync_committee_message( + chain: &BeaconChain, + sync_message: &SyncCommitteeMessage, + pubkey_bytes: &PublicKeyBytes, +) -> Result<(), Error> { + let signature_setup_timer = + metrics::start_timer(&metrics::SYNC_MESSAGE_PROCESSING_SIGNATURE_SETUP_TIMES); + + let pubkey_cache = chain + .validator_pubkey_cache + .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) + .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?; + + let pubkey = pubkey_cache + .get_pubkey_from_pubkey_bytes(pubkey_bytes) + .map(Cow::Borrowed) + .ok_or_else(|| Error::UnknownValidatorPubkey(*pubkey_bytes))?; + + let next_slot_epoch = (sync_message.get_slot() + 1).epoch(T::EthSpec::slots_per_epoch()); + let fork = chain.spec.fork_at_epoch(next_slot_epoch); + + let agg_sig = AggregateSignature::from(&sync_message.signature); + let signature_set = sync_committee_message_set_from_pubkeys::( + pubkey, + &agg_sig, + sync_message.slot.epoch(T::EthSpec::slots_per_epoch()), + sync_message.beacon_block_root, + &fork, + chain.genesis_validators_root, + &chain.spec, + ) + .map_err(BeaconChainError::SignatureSetError)?; + + metrics::stop_timer(signature_setup_timer); + + let _signature_verification_timer = + metrics::start_timer(&metrics::SYNC_MESSAGE_PROCESSING_SIGNATURE_TIMES); + + if signature_set.verify() { + Ok(()) + } else { + Err(Error::InvalidSignature) + } +} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 57297b01e68..563ef9d35d6 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -31,17 +31,18 @@ use store::{config::StoreConfig, BlockReplay, HotColdDB, ItemStore, LevelDB, Mem use task_executor::ShutdownReason; use tempfile::{tempdir, TempDir}; use tree_hash::TreeHash; +use types::sync_selection_proof::SyncSelectionProof; +pub use types::test_utils::generate_deterministic_keypairs; use types::{ typenum::U4294967296, AggregateSignature, Attestation, AttestationData, AttesterSlashing, BeaconBlock, BeaconState, BeaconStateHash, ChainSpec, Checkpoint, Deposit, DepositData, Domain, Epoch, EthSpec, ForkName, Graffiti, Hash256, IndexedAttestation, Keypair, ProposerSlashing, PublicKeyBytes, SelectionProof, SignatureBytes, SignedAggregateAndProof, SignedBeaconBlock, - SignedBeaconBlockHash, SignedRoot, SignedVoluntaryExit, Slot, SubnetId, VariableList, + SignedBeaconBlockHash, SignedContributionAndProof, SignedRoot, SignedVoluntaryExit, Slot, + SubnetId, SyncCommittee, SyncCommitteeContribution, SyncCommitteeMessage, VariableList, VoluntaryExit, }; -pub use types::test_utils::generate_deterministic_keypairs; - // 4th September 2019 pub const HARNESS_GENESIS_TIME: u64 = 1_567_552_690; // This parameter is required by a builder but not used because we use the `TestingSlotClock`. @@ -87,6 +88,14 @@ pub enum AttestationStrategy { SomeValidators(Vec), } +/// Indicates whether the `BeaconChainHarness` should use the `state.current_sync_committee` or +/// `state.next_sync_committee` when creating sync messages or contributions. +#[derive(Clone, Debug)] +pub enum RelativeSyncCommittee { + Current, + Next, +} + fn make_rng() -> Mutex { // Nondeterminism in tests is a highly undesirable thing. Seed the RNG to some arbitrary // but fixed value for reproducibility. @@ -155,6 +164,11 @@ pub type HarnessAttestations = Vec<( Option>, )>; +pub type HarnessSyncContributions = Vec<( + Vec<(SyncCommitteeMessage, usize)>, + Option>, +)>; + impl BeaconChainHarness> { pub fn new( eth_spec_instance: E, @@ -596,6 +610,57 @@ where .collect() } + /// A list of sync messages for the given state. + pub fn make_sync_committee_messages( + &self, + state: &BeaconState, + head_block_root: Hash256, + message_slot: Slot, + relative_sync_committee: RelativeSyncCommittee, + ) -> Vec> { + let sync_committee: Arc> = match relative_sync_committee { + RelativeSyncCommittee::Current => state + .current_sync_committee() + .expect("should be called on altair beacon state") + .clone(), + RelativeSyncCommittee::Next => state + .next_sync_committee() + .expect("should be called on altair beacon state") + .clone(), + }; + + sync_committee + .pubkeys + .as_ref() + .chunks(E::sync_subcommittee_size()) + .map(|subcommittee| { + subcommittee + .iter() + .enumerate() + .map(|(subcommittee_position, pubkey)| { + let validator_index = self + .chain + .validator_index(pubkey) + .expect("should find validator index") + .expect("pubkey should exist in the beacon chain"); + + let sync_message = SyncCommitteeMessage::new::( + message_slot, + head_block_root, + validator_index as u64, + &self.validator_keypairs[validator_index].sk, + &state.fork(), + state.genesis_validators_root(), + &self.spec, + ); + + (sync_message, subcommittee_position) + }) + .collect() + }) + .collect() + } + /// Deprecated: Use make_unaggregated_attestations() instead. /// /// A list of attestations for each committee for the given slot. @@ -633,8 +698,8 @@ where slot: Slot, ) -> HarnessAttestations { let unaggregated_attestations = self.make_unaggregated_attestations( - &attesting_validators, - &state, + attesting_validators, + state, state_root, block_hash, slot, @@ -712,6 +777,94 @@ where .collect() } + pub fn make_sync_contributions( + &self, + state: &BeaconState, + block_hash: Hash256, + slot: Slot, + relative_sync_committee: RelativeSyncCommittee, + ) -> HarnessSyncContributions { + let sync_messages = + self.make_sync_committee_messages(state, block_hash, slot, relative_sync_committee); + + let sync_contributions: Vec>> = sync_messages + .iter() + .enumerate() + .map(|(subnet_id, committee_messages)| { + // If there are any sync messages in this committee, create an aggregate. + if let Some((sync_message, subcommittee_position)) = committee_messages.first() { + let sync_committee: Arc> = state + .current_sync_committee() + .expect("should be called on altair beacon state") + .clone(); + + let aggregator_index = sync_committee + .get_subcommittee_pubkeys(subnet_id) + .unwrap() + .iter() + .find_map(|pubkey| { + let validator_index = self + .chain + .validator_index(pubkey) + .expect("should find validator index") + .expect("pubkey should exist in the beacon chain"); + + let selection_proof = SyncSelectionProof::new::( + slot, + subnet_id as u64, + &self.validator_keypairs[validator_index].sk, + &state.fork(), + state.genesis_validators_root(), + &self.spec, + ); + + selection_proof + .is_aggregator::() + .expect("should determine aggregator") + .then(|| validator_index) + })?; + + let default = SyncCommitteeContribution::from_message( + sync_message, + subnet_id as u64, + *subcommittee_position, + ) + .expect("should derive sync contribution"); + + let aggregate = committee_messages.iter().skip(1).fold( + default, + |mut agg, (sig, position)| { + let contribution = SyncCommitteeContribution::from_message( + sig, + subnet_id as u64, + *position, + ) + .expect("should derive sync contribution"); + agg.aggregate(&contribution); + agg + }, + ); + + let signed_aggregate = SignedContributionAndProof::from_aggregate( + aggregator_index as u64, + aggregate, + None, + &self.validator_keypairs[aggregator_index].sk, + &state.fork(), + state.genesis_validators_root(), + &self.spec, + ); + + Some(signed_aggregate) + } else { + None + } + }) + .collect(); + + sync_messages.into_iter().zip(sync_contributions).collect() + } + pub fn make_attester_slashing(&self, validator_indices: Vec) -> AttesterSlashing { let mut attestation_1 = IndexedAttestation { attesting_indices: VariableList::new(validator_indices).unwrap(), @@ -836,7 +989,7 @@ where let mut signed_block_headers = vec![block_header_1, block_header_2] .into_iter() .map(|block_header| { - block_header.sign::(&sk, &fork, genesis_validators_root, &self.chain.spec) + block_header.sign::(sk, &fork, genesis_validators_root, &self.chain.spec) }) .collect::>(); @@ -1043,7 +1196,7 @@ where validators: &[usize], ) { let attestations = - self.make_attestations(validators, &state, state_root, block_hash, block.slot()); + self.make_attestations(validators, state, state_root, block_hash, block.slot()); self.process_attestations(attestations); } diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index 3022dd5c92e..a1879060aad 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -6,7 +6,9 @@ use crate::metrics; use parking_lot::RwLock; use slog::{crit, error, info, warn, Logger}; use slot_clock::SlotClock; -use state_processing::per_epoch_processing::ValidatorStatus; +use state_processing::per_epoch_processing::{ + errors::EpochProcessingError, EpochProcessingSummary, +}; use std::collections::{HashMap, HashSet}; use std::convert::TryFrom; use std::io; @@ -124,8 +126,6 @@ type SummaryMap = HashMap; struct MonitoredValidator { /// A human-readable identifier for the validator. pub id: String, - /// The validator voting pubkey. - pub pubkey: PublicKeyBytes, /// The validator index in the state. pub index: Option, /// A history of the validator over time. @@ -138,7 +138,6 @@ impl MonitoredValidator { id: index .map(|i| i.to_string()) .unwrap_or_else(|| pubkey.to_string()), - pubkey, index, summaries: <_>::default(), } @@ -326,7 +325,12 @@ impl ValidatorMonitor { } } - pub fn process_validator_statuses(&self, epoch: Epoch, summaries: &[ValidatorStatus]) { + pub fn process_validator_statuses( + &self, + epoch: Epoch, + summary: &EpochProcessingSummary, + spec: &ChainSpec, + ) -> Result<(), EpochProcessingError> { for monitored_validator in self.validators.values() { // We subtract two from the state of the epoch that generated these summaries. // @@ -338,93 +342,123 @@ impl ValidatorMonitor { let i = i as usize; let id = &monitored_validator.id; - if let Some(summary) = summaries.get(i) { - if summary.is_previous_epoch_attester { - let lag = summary - .inclusion_info - .map(|i| format!("{} slot(s)", i.delay.saturating_sub(1).to_string())) - .unwrap_or_else(|| "??".to_string()); + /* + * These metrics are reflected differently between Base and Altair. + * + * For Base, any attestation that is included on-chain will match the source. + * + * However, in Altair, only attestations that are "timely" are registered as + * matching the source. + */ + + let previous_epoch_active = summary.is_active_unslashed_in_previous_epoch(i); + let previous_epoch_matched_source = summary.is_previous_epoch_source_attester(i)?; + let previous_epoch_matched_target = summary.is_previous_epoch_target_attester(i)?; + let previous_epoch_matched_head = summary.is_previous_epoch_head_attester(i)?; + let previous_epoch_matched_any = previous_epoch_matched_source + || previous_epoch_matched_target + || previous_epoch_matched_head; + + if !previous_epoch_active { + // Monitored validator is not active, due to awaiting activation + // or being exited/withdrawn. Do not attempt to report on its + // attestations. + continue; + } - info!( - self.log, - "Previous epoch attestation success"; - "inclusion_lag" => lag, - "matched_target" => summary.is_previous_epoch_target_attester, - "matched_head" => summary.is_previous_epoch_head_attester, - "epoch" => prev_epoch, - "validator" => id, - ); - } else if summary.is_active_in_previous_epoch - && !summary.is_previous_epoch_attester - { - error!( - self.log, - "Previous epoch attestation missing"; - "epoch" => prev_epoch, - "validator" => id, - ) - } else if !summary.is_active_in_previous_epoch { - // Monitored validator is not active, due to awaiting activation - // or being exited/withdrawn. Do not attempt to report on its - // attestations. - continue; - } + // Indicates if any attestation made it on-chain. + // + // For Base states, this will be *any* attestation whatsoever. For Altair states, + // this will be any attestation that matched a "timely" flag. + if previous_epoch_matched_any { + metrics::inc_counter_vec( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_HIT, + &[id], + ); + info!( + self.log, + "Previous epoch attestation success"; + "matched_source" => previous_epoch_matched_source, + "matched_target" => previous_epoch_matched_target, + "matched_head" => previous_epoch_matched_head, + "epoch" => prev_epoch, + "validator" => id, + ) + } else { + metrics::inc_counter_vec( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_MISS, + &[id], + ); + error!( + self.log, + "Previous epoch attestation missing"; + "epoch" => prev_epoch, + "validator" => id, + ) + } - if summary.is_previous_epoch_attester { - metrics::inc_counter_vec( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_HIT, - &[id], - ); - } else { - metrics::inc_counter_vec( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_MISS, - &[id], - ); - } - if summary.is_previous_epoch_head_attester { - metrics::inc_counter_vec( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_HIT, - &[id], - ); - } else { - metrics::inc_counter_vec( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_MISS, - &[id], - ); - warn!( - self.log, - "Attested to an incorrect head"; - "epoch" => prev_epoch, - "validator" => id, - ); - } - if summary.is_previous_epoch_target_attester { - metrics::inc_counter_vec( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_HIT, - &[id], - ); - } else { - metrics::inc_counter_vec( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_MISS, - &[id], - ); + // Indicates if any on-chain attestation hit the head. + if previous_epoch_matched_head { + metrics::inc_counter_vec( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_HIT, + &[id], + ); + } else { + metrics::inc_counter_vec( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_MISS, + &[id], + ); + warn!( + self.log, + "Attestation failed to match head"; + "epoch" => prev_epoch, + "validator" => id, + ); + } + + // Indicates if any on-chain attestation hit the target. + if previous_epoch_matched_target { + metrics::inc_counter_vec( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_HIT, + &[id], + ); + } else { + metrics::inc_counter_vec( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_MISS, + &[id], + ); + warn!( + self.log, + "Attestation failed to match target"; + "epoch" => prev_epoch, + "validator" => id, + ); + } + + // For pre-Altair, state the inclusion distance. This information is not retained in + // the Altair state. + if let Some(inclusion_info) = summary.previous_epoch_inclusion_info(i) { + if inclusion_info.delay > spec.min_attestation_inclusion_delay { warn!( self.log, - "Attested to an incorrect target"; + "Sub-optimal inclusion delay"; + "optimal" => spec.min_attestation_inclusion_delay, + "delay" => inclusion_info.delay, "epoch" => prev_epoch, "validator" => id, ); } - if let Some(inclusion_info) = summary.inclusion_info { - metrics::set_int_gauge( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_INCLUSION_DISTANCE, - &[id], - inclusion_info.delay as i64, - ); - } + + metrics::set_int_gauge( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_INCLUSION_DISTANCE, + &[id], + inclusion_info.delay as i64, + ); } } } + + Ok(()) } fn get_validator_id(&self, validator_index: u64) -> Option<&str> { diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index be0ac7b93fc..8b27d89e097 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -173,6 +173,13 @@ impl ValidatorPubkeyCache { self.pubkeys.get(i) } + /// Get the `PublicKey` for a validator with `PublicKeyBytes`. + pub fn get_pubkey_from_pubkey_bytes(&self, pubkey: &PublicKeyBytes) -> Option<&PublicKey> { + self.get_index(pubkey) + .map(|index| self.get(index)) + .flatten() + } + /// Get the public key (in bytes form) for a validator with index `i`. pub fn get_pubkey_bytes(&self, i: usize) -> Option<&PublicKeyBytes> { self.pubkey_bytes.get(i) diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs new file mode 100644 index 00000000000..beeea37e622 --- /dev/null +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -0,0 +1,668 @@ +#![cfg(not(debug_assertions))] + +#[macro_use] +extern crate lazy_static; + +use beacon_chain::sync_committee_verification::Error as SyncCommitteeError; +use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType, RelativeSyncCommittee}; +use int_to_bytes::int_to_bytes32; +use safe_arith::SafeArith; +use store::{SignedContributionAndProof, SyncCommitteeMessage}; +use tree_hash::TreeHash; +use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; +use types::{ + AggregateSignature, Epoch, EthSpec, Hash256, Keypair, MainnetEthSpec, SecretKey, Slot, + SyncSelectionProof, SyncSubnetId, Unsigned, +}; + +pub type E = MainnetEthSpec; + +pub const VALIDATOR_COUNT: usize = 256; + +lazy_static! { + /// A cached set of keys. + static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); +} + +/// Returns a beacon chain harness. +fn get_harness(validator_count: usize) -> BeaconChainHarness> { + let mut spec = E::default_spec(); + spec.altair_fork_epoch = Some(Epoch::new(0)); + let harness = BeaconChainHarness::new( + MainnetEthSpec, + Some(spec), + KEYPAIRS[0..validator_count].to_vec(), + ); + + harness.advance_slot(); + + harness +} + +/// Returns a sync message that is valid for some slot in the given `chain`. +/// +/// Also returns some info about who created it. +fn get_valid_sync_committee_message( + harness: &BeaconChainHarness>, + slot: Slot, + relative_sync_committee: RelativeSyncCommittee, +) -> (SyncCommitteeMessage, usize, SecretKey, SyncSubnetId) { + let head_state = harness + .chain + .head_beacon_state() + .expect("should get head state"); + let head_block_root = harness + .chain + .head() + .expect("should get head state") + .beacon_block_root; + let (signature, _) = harness + .make_sync_committee_messages(&head_state, head_block_root, slot, relative_sync_committee) + .get(0) + .expect("sync messages should exist") + .get(0) + .expect("first sync message should exist") + .clone(); + + ( + signature.clone(), + signature.validator_index as usize, + harness.validator_keypairs[signature.validator_index as usize] + .sk + .clone(), + SyncSubnetId::new(0), + ) +} + +fn get_valid_sync_contribution( + harness: &BeaconChainHarness>, + relative_sync_committee: RelativeSyncCommittee, +) -> (SignedContributionAndProof, usize, SecretKey) { + let head_state = harness + .chain + .head_beacon_state() + .expect("should get head state"); + + let head_block_root = harness + .chain + .head() + .expect("should get head state") + .beacon_block_root; + let sync_contributions = harness.make_sync_contributions( + &head_state, + head_block_root, + head_state.slot(), + relative_sync_committee, + ); + + let (_, contribution_opt) = sync_contributions + .get(0) + .expect("sync contributions should exist"); + let contribution = contribution_opt + .as_ref() + .cloned() + .expect("signed contribution and proof should exist"); + + let aggregator_index = contribution.message.aggregator_index as usize; + + ( + contribution, + aggregator_index, + harness.validator_keypairs[aggregator_index].sk.clone(), + ) +} + +/// Returns a proof and index for a validator that is **not** an aggregator for the current sync period. +fn get_non_aggregator( + harness: &BeaconChainHarness>, + slot: Slot, +) -> (usize, SecretKey) { + let state = &harness.chain.head().expect("should get head").beacon_state; + let sync_subcommittee_size = E::sync_committee_size() + .safe_div(SYNC_COMMITTEE_SUBNET_COUNT as usize) + .expect("should determine sync subcommittee size"); + let sync_committee = state + .current_sync_committee() + .expect("should use altair state") + .clone(); + let non_aggregator_index = sync_committee + .pubkeys + .chunks(sync_subcommittee_size) + .enumerate() + .find_map(|(subcommittee_index, subcommittee)| { + subcommittee.iter().find_map(|pubkey| { + let validator_index = harness + .chain + .validator_index(&pubkey) + .expect("should get validator index") + .expect("pubkey should exist in beacon chain"); + + let selection_proof = SyncSelectionProof::new::( + slot, + subcommittee_index as u64, + &harness.validator_keypairs[validator_index].sk, + &state.fork(), + state.genesis_validators_root(), + &harness.spec, + ); + + if !selection_proof + .is_aggregator::() + .expect("should determine aggregator") + { + Some(validator_index) + } else { + None + } + }) + }) + .expect("should find at least one non-aggregator"); + + let aggregator_sk = harness.validator_keypairs[non_aggregator_index].sk.clone(); + (non_aggregator_index, aggregator_sk) +} + +/// Tests verification of `SignedContributionAndProof` from the gossip network. +#[test] +fn aggregated_gossip_verification() { + let harness = get_harness(VALIDATOR_COUNT); + let state = harness.get_current_state(); + + harness.add_attested_blocks_at_slots( + state, + Hash256::zero(), + &[Slot::new(1), Slot::new(2)], + (0..VALIDATOR_COUNT).collect::>().as_slice(), + ); + + let current_slot = harness.chain.slot().expect("should get slot"); + + let (valid_aggregate, aggregator_index, aggregator_sk) = + get_valid_sync_contribution(&harness, RelativeSyncCommittee::Current); + + macro_rules! assert_invalid { + ($desc: tt, $attn_getter: expr, $($error: pat) |+ $( if $guard: expr )?) => { + assert!( + matches!( + harness + .chain + .verify_sync_contribution_for_gossip($attn_getter) + .err() + .expect(&format!( + "{} should error during verify_sync_contribution_for_gossip", + $desc + )), + $( $error ) |+ $( if $guard )? + ), + "case: {}", + $desc, + ); + }; + } + + /* + * The following two tests ensure: + * + * The contribution's slot is for the current slot, i.e. contribution.slot == current_slot + * (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance). + */ + + let future_slot = current_slot + 1; + assert_invalid!( + "aggregate from future slot", + { + let mut a = valid_aggregate.clone(); + a.message.contribution.slot = future_slot; + a + }, + SyncCommitteeError::FutureSlot { message_slot, latest_permissible_slot } + if message_slot == future_slot && latest_permissible_slot == current_slot + ); + + let early_slot = current_slot + .as_u64() + // Subtract an additional slot since the harness will be exactly on the start of the + // slot and the propagation tolerance will allow an extra slot. + .checked_sub(2) + .expect("chain is not sufficiently deep for test") + .into(); + assert_invalid!( + "aggregate from past slot", + { + let mut a = valid_aggregate.clone(); + a.message.contribution.slot = early_slot; + a + }, + SyncCommitteeError::PastSlot { + message_slot, + + earliest_permissible_slot + } + if message_slot == early_slot + && earliest_permissible_slot == current_slot - 1 + ); + + /* + * The following test ensures: + * + * The subcommittee index is in the allowed range, + * i.e. `contribution.subcommittee_index < SYNC_COMMITTEE_SUBNET_COUNT`. + */ + + assert_invalid!( + "subcommittee index out of range", + { + let mut a = valid_aggregate.clone(); + a.message.contribution.subcommittee_index = SYNC_COMMITTEE_SUBNET_COUNT; + a + }, + SyncCommitteeError::InvalidSubcommittee { + subcommittee_index, + subcommittee_size, + } + if subcommittee_index == SYNC_COMMITTEE_SUBNET_COUNT && subcommittee_size == SYNC_COMMITTEE_SUBNET_COUNT + + ); + + /* + * The following test ensures: + * + * The sync contribution has participants. + */ + + assert_invalid!( + "aggregate with no participants", + { + let mut a = valid_aggregate.clone(); + let aggregation_bits = &mut a.message.contribution.aggregation_bits; + aggregation_bits.difference_inplace(&aggregation_bits.clone()); + assert!(aggregation_bits.is_zero()); + a.message.contribution.signature = AggregateSignature::infinity(); + a + }, + SyncCommitteeError::EmptyAggregationBitfield + ); + + /* + * This test ensures: + * + * The aggregator signature, signed_contribution_and_proof.signature, is valid. + */ + + assert_invalid!( + "aggregate with bad signature", + { + let mut a = valid_aggregate.clone(); + + a.signature = aggregator_sk.sign(Hash256::from_low_u64_be(42)); + + a + }, + SyncCommitteeError::InvalidSignature + ); + + /* + * The following test ensures: + * + * The contribution_and_proof.selection_proof is a valid signature of the `SyncAggregatorSelectionData` + * derived from the contribution by the validator with index `contribution_and_proof.aggregator_index`. + */ + + assert_invalid!( + "aggregate with bad selection proof signature", + { + let mut a = valid_aggregate.clone(); + + // Generate some random signature until happens to be a valid selection proof. We need + // this in order to reach the signature verification code. + // + // Could run for ever, but that seems _really_ improbable. + let mut i: u64 = 0; + a.message.selection_proof = loop { + i += 1; + let proof: SyncSelectionProof = aggregator_sk + .sign(Hash256::from_slice(&int_to_bytes32(i))) + .into(); + if proof + .is_aggregator::() + .expect("should determine aggregator") + { + break proof.into(); + } + }; + + a + }, + SyncCommitteeError::InvalidSignature + ); + + /* + * The following test ensures: + * + * The aggregate signature is valid for the message `beacon_block_root` and aggregate pubkey + * derived from the participation info in `aggregation_bits` for the subcommittee specified by + * the `contribution.subcommittee_index`. + */ + + assert_invalid!( + "aggregate with bad aggregate signature", + { + let mut a = valid_aggregate.clone(); + + let mut agg_sig = AggregateSignature::infinity(); + agg_sig.add_assign(&aggregator_sk.sign(Hash256::from_low_u64_be(42))); + a.message.contribution.signature = agg_sig; + + a + }, + SyncCommitteeError::InvalidSignature + ); + + let too_high_index = ::ValidatorRegistryLimit::to_u64() + 1; + assert_invalid!( + "aggregate with too-high aggregator index", + { + let mut a = valid_aggregate.clone(); + a.message.aggregator_index = too_high_index; + a + }, + SyncCommitteeError::UnknownValidatorIndex(index) + if index == too_high_index as usize + ); + + /* + * The following test ensures: + * + * The aggregator's validator index is in the declared subcommittee of the current sync + * committee -- i.e. state.validators[contribution_and_proof.aggregator_index].pubkey in + * get_sync_subcommittee_pubkeys(state, contribution.subcommittee_index). + */ + + assert_invalid!( + "aggregate with unknown aggregator index", + { + let mut a = valid_aggregate.clone(); + a.message.contribution.subcommittee_index +=1; + a + }, + SyncCommitteeError::AggregatorNotInCommittee { + aggregator_index + } + if aggregator_index == valid_aggregate.message.aggregator_index as u64 + ); + + /* + * The following test ensures: + * + * `contribution_and_proof.selection_proof` selects the validator as an aggregator for the + * slot -- i.e. is_sync_committee_aggregator(contribution_and_proof.selection_proof) returns True. + */ + + let (non_aggregator_index, non_aggregator_sk) = get_non_aggregator(&harness, current_slot); + assert_invalid!( + "aggregate from non-aggregator", + { + SignedContributionAndProof::from_aggregate( + non_aggregator_index as u64, + valid_aggregate.message.contribution.clone(), + None, + &non_aggregator_sk, + &harness.chain.head_info().expect("should get head info").fork, + harness.chain.genesis_validators_root, + &harness.chain.spec, + ) + }, + SyncCommitteeError::InvalidSelectionProof { + aggregator_index: index + } + if index == non_aggregator_index as u64 + ); + + // NOTE: from here on, the tests are stateful, and rely on the valid sync contribution having been + // seen. A refactor to give each test case its own state might be nice at some point + harness + .chain + .verify_sync_contribution_for_gossip(valid_aggregate.clone()) + .expect("should verify sync contribution"); + + /* + * The following test ensures: + * + * The sync committee contribution is the first valid contribution received for the aggregator + * with index contribution_and_proof.aggregator_index for the slot contribution.slot and + * subcommittee index contribution.subcommittee_index. + */ + + assert_invalid!( + "aggregate that has already been seen", + valid_aggregate.clone(), + SyncCommitteeError::SyncContributionAlreadyKnown(hash) + if hash == valid_aggregate.message.contribution.tree_hash_root() + ); + + /* + * The following test ensures: + * + * The sync committee contribution is the first valid contribution received for the aggregator + * with index `contribution_and_proof.aggregator_index` for the slot `contribution.slot` and + * subcommittee index `contribution.subcommittee_index`. + */ + + assert_invalid!( + "aggregate from aggregator and subcommittee that has already been seen", + { + let mut a = valid_aggregate; + a.message.contribution.beacon_block_root = Hash256::from_low_u64_le(42); + a + }, + SyncCommitteeError::AggregatorAlreadyKnown(index) + if index == aggregator_index as u64 + ); + + /* + * The following test ensures that: + * + * A sync committee contribution for the slot before the sync committee period boundary is verified + * using the `head_state.next_sync_committee`. + */ + + // Advance to the slot before the 3rd sync committee period because `current_sync_committee = next_sync_committee` + // at genesis. + let state = harness.get_current_state(); + let target_slot = Slot::new( + (2 * harness.spec.epochs_per_sync_committee_period.as_u64() * E::slots_per_epoch()) - 1, + ); + + harness + .add_attested_block_at_slot(target_slot, state, Hash256::zero(), &[]) + .expect("should add block"); + + // **Incorrectly** create a sync contribution using the current sync committee + let (next_valid_contribution, _, _) = + get_valid_sync_contribution(&harness, RelativeSyncCommittee::Current); + + assert_invalid!( + "sync contribution created with incorrect sync committee", + next_valid_contribution.clone(), + SyncCommitteeError::InvalidSignature | SyncCommitteeError::AggregatorNotInCommittee { .. } + ); +} + +/// Tests the verification conditions for sync committee messages on the gossip network. +#[test] +fn unaggregated_gossip_verification() { + let harness = get_harness(VALIDATOR_COUNT); + let state = harness.get_current_state(); + + harness.add_attested_blocks_at_slots( + state, + Hash256::zero(), + &[Slot::new(1), Slot::new(2)], + (0..VALIDATOR_COUNT).collect::>().as_slice(), + ); + + let current_slot = harness.chain.slot().expect("should get slot"); + + let (valid_sync_committee_message, expected_validator_index, validator_sk, subnet_id) = + get_valid_sync_committee_message(&harness, current_slot, RelativeSyncCommittee::Current); + + macro_rules! assert_invalid { + ($desc: tt, $attn_getter: expr, $subnet_getter: expr, $($error: pat) |+ $( if $guard: expr )?) => { + assert!( + matches!( + harness + .chain + .verify_sync_committee_message_for_gossip($attn_getter, $subnet_getter) + .err() + .expect(&format!( + "{} should error during verify_sync_committee_message_for_gossip", + $desc + )), + $( $error ) |+ $( if $guard )? + ), + "case: {}", + $desc, + ); + }; + } + + /* + * The following test ensures: + * + * The subnet_id is valid for the given validator, i.e. subnet_id in + * compute_subnets_for_sync_committee(state, sync_committee_message.validator_index). + */ + let id: u64 = subnet_id.into(); + let invalid_subnet_id = SyncSubnetId::new(id + 1); + assert_invalid!( + "invalid subnet id", + { + valid_sync_committee_message.clone() + }, + invalid_subnet_id, + SyncCommitteeError::InvalidSubnetId { + received, + expected, + } + if received == invalid_subnet_id && expected.contains(&subnet_id) + ); + + /* + * The following two tests ensure: + * + * This signature is within a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance from the current slot. + */ + + let future_slot = current_slot + 1; + assert_invalid!( + "sync message from future slot", + { + let mut signature = valid_sync_committee_message.clone(); + signature.slot = future_slot; + signature + }, + subnet_id, + SyncCommitteeError::FutureSlot { + message_slot, + latest_permissible_slot, + } + if message_slot == future_slot && latest_permissible_slot == current_slot + ); + + // Subtract an additional slot since the harness will be exactly on the start of the + // slot and the propagation tolerance will allow an extra slot. + let early_slot = current_slot + .as_u64() + .checked_sub(2) + .expect("chain is not sufficiently deep for test") + .into(); + assert_invalid!( + "sync message from past slot", + { + let mut signature = valid_sync_committee_message.clone(); + signature.slot = early_slot; + signature + }, + subnet_id, + SyncCommitteeError::PastSlot { + message_slot, + + earliest_permissible_slot, + } + if message_slot == early_slot && earliest_permissible_slot == current_slot - 1 + ); + + /* + * The following test ensures that: + * + * The signature is valid for the message beacon_block_root for the validator referenced by + * validator_index. + */ + assert_invalid!( + "sync message with bad signature", + { + let mut sync_message = valid_sync_committee_message.clone(); + + sync_message.signature = validator_sk.sign(Hash256::from_low_u64_le(424242)); + + sync_message + }, + subnet_id, + SyncCommitteeError::InvalidSignature + ); + + harness + .chain + .verify_sync_committee_message_for_gossip(valid_sync_committee_message.clone(), subnet_id) + .expect("valid sync message should be verified"); + + /* + * The following test ensures that: + * + * There has been no other valid sync committee message for the declared slot for the + * validator referenced by sync_committee_message.validator_index. + */ + assert_invalid!( + "sync message that has already been seen", + valid_sync_committee_message, + subnet_id, + SyncCommitteeError::PriorSyncCommitteeMessageKnown { + validator_index, + slot, + } + if validator_index == expected_validator_index as u64 && slot == current_slot + ); + + /* + * The following test ensures that: + * + * A sync committee message for the slot before the sync committee period boundary is verified + * using the `head_state.next_sync_committee`. + */ + + // Advance to the slot before the 3rd sync committee period because `current_sync_committee = next_sync_committee` + // at genesis. + let state = harness.get_current_state(); + let target_slot = Slot::new( + (2 * harness.spec.epochs_per_sync_committee_period.as_u64() * E::slots_per_epoch()) - 1, + ); + + harness + .add_attested_block_at_slot(target_slot, state, Hash256::zero(), &[]) + .expect("should add block"); + + // **Incorrectly** create a sync message using the current sync committee + let (next_valid_sync_committee_message, _, _, next_subnet_id) = + get_valid_sync_committee_message(&harness, target_slot, RelativeSyncCommittee::Current); + + assert_invalid!( + "sync message on incorrect subnet", + next_valid_sync_committee_message.clone(), + next_subnet_id, + SyncCommitteeError::InvalidSubnetId { + received, + expected, + } + if received == subnet_id && !expected.contains(&subnet_id) + ); +} diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 2740d566a85..497381bbf12 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -436,7 +436,8 @@ fn roundtrip_operation_pool() { .get_item::>(&OP_POOL_DB_KEY) .expect("should read db") .expect("should find op pool") - .into_operation_pool(); + .into_operation_pool() + .unwrap(); assert_eq!(harness.chain.op_pool, restored_op_pool); } diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 3e67624461d..b9aa725ad74 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -26,7 +26,7 @@ error-chain = "0.12.4" serde_yaml = "0.8.13" slog = { version = "2.5.2", features = ["max_level_trace"] } slog-async = "2.5.0" -tokio = "1.1.0" +tokio = "1.7.1" dirs = "3.0.1" futures = "0.3.7" reqwest = { version = "0.11.0", features = ["native-tls-vendored"] } diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index aaa8e619ce7..82ac3a30465 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -228,7 +228,6 @@ fn eth1_logging(beacon_chain: &BeaconChain, log: &Logger warn!( log, "Syncing eth1 block cache"; - "msg" => "sync can take longer when using remote eth1 nodes", "est_blocks_remaining" => distance, ); } diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index 8b80eee1098..730af0bf503 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -26,7 +26,7 @@ tree_hash = "0.1.1" eth2_hashing = "0.1.0" parking_lot = "0.11.0" slog = "2.5.2" -tokio = { version = "1.1.0", features = ["full"] } +tokio = { version = "1.7.1", features = ["full"] } state_processing = { path = "../../consensus/state_processing" } libflate = "1.0.2" lighthouse_metrics = { path = "../../common/lighthouse_metrics"} diff --git a/beacon_node/eth1/src/deposit_cache.rs b/beacon_node/eth1/src/deposit_cache.rs index 5b6ffec1c78..7c67893fb34 100644 --- a/beacon_node/eth1/src/deposit_cache.rs +++ b/beacon_node/eth1/src/deposit_cache.rs @@ -12,7 +12,7 @@ pub enum Error { /// Logs have to be added with monotonically-increasing block numbers. NonConsecutive { log_index: u64, expected: usize }, /// The eth1 event log data was unable to be parsed. - LogParseError(String), + LogParse(String), /// There are insufficient deposits in the cache to fulfil the request. InsufficientDeposits { known_deposits: usize, @@ -26,9 +26,9 @@ pub enum Error { /// E.g., you cannot request deposit 10 when the deposit count is 9. DepositCountInvalid { deposit_count: u64, range_end: u64 }, /// Error with the merkle tree for deposits. - DepositTreeError(merkle_proof::MerkleTreeError), + DepositTree(merkle_proof::MerkleTreeError), /// An unexpected condition was encountered. - InternalError(String), + Internal(String), } #[derive(Encode, Decode, Clone)] @@ -160,7 +160,7 @@ impl DepositCache { self.logs.push(log); self.deposit_tree .push_leaf(deposit) - .map_err(Error::DepositTreeError)?; + .map_err(Error::DepositTree)?; self.deposit_roots.push(self.deposit_tree.root()); Ok(DepositCacheInsertOutcome::Inserted) } @@ -219,7 +219,7 @@ impl DepositCache { let leaves = self .leaves .get(0..deposit_count as usize) - .ok_or_else(|| Error::InternalError("Unable to get known leaves".into()))?; + .ok_or_else(|| Error::Internal("Unable to get known leaves".into()))?; // Note: there is likely a more optimal solution than recreating the `DepositDataTree` // each time this function is called. @@ -233,7 +233,7 @@ impl DepositCache { let deposits = self .logs .get(start as usize..end as usize) - .ok_or_else(|| Error::InternalError("Unable to get known log".into()))? + .ok_or_else(|| Error::Internal("Unable to get known log".into()))? .iter() .map(|deposit_log| { let (_leaf, proof) = tree.generate_proof(deposit_log.index as usize); diff --git a/beacon_node/eth1/src/http.rs b/beacon_node/eth1/src/http.rs index af628c3658c..9e3465f0fa4 100644 --- a/beacon_node/eth1/src/http.rs +++ b/beacon_node/eth1/src/http.rs @@ -378,7 +378,7 @@ pub async fn get_deposit_logs_in_range( .ok_or("Data was not string")?; Ok(Log { - block_number: hex_to_u64_be(&block_number)?, + block_number: hex_to_u64_be(block_number)?, data: hex_to_bytes(data)?, }) }) @@ -446,7 +446,7 @@ pub async fn send_rpc_request( /// Accepts an entire HTTP body (as a string) and returns either the `result` field or the `error['message']` field, as a serde `Value`. fn response_result_or_error(response: &str) -> Result { - let json = serde_json::from_str::(&response) + let json = serde_json::from_str::(response) .map_err(|e| RpcError::InvalidJson(e.to_string()))?; if let Some(error) = json.get("error").map(|e| e.get("message")).flatten() { diff --git a/beacon_node/eth1/src/inner.rs b/beacon_node/eth1/src/inner.rs index 2dc39a1de92..15a3aefa783 100644 --- a/beacon_node/eth1/src/inner.rs +++ b/beacon_node/eth1/src/inner.rs @@ -48,7 +48,7 @@ impl Inner { /// Encode the eth1 block and deposit cache as bytes. pub fn as_bytes(&self) -> Vec { - let ssz_eth1_cache = SszEth1Cache::from_inner(&self); + let ssz_eth1_cache = SszEth1Cache::from_inner(self); ssz_eth1_cache.as_ssz_bytes() } diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index a8964ada7c5..ab00ba07098 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -694,8 +694,8 @@ impl Service { { crit!( self.log, - "Couldn't connect to any eth1 node. Please ensure that you have an \ - eth1 http server running locally on http://localhost:8545 or specify \ + "Could not connect to a suitable eth1 node. Please ensure that you have \ + an eth1 http server running locally on http://localhost:8545 or specify \ one or more (remote) endpoints using \ `--eth1-endpoints `. \ Also ensure that `eth` and `net` apis are enabled on the eth1 http \ @@ -705,7 +705,7 @@ impl Service { } } } - endpoints.fallback.map_format_error(|s| &s.endpoint, &e) + endpoints.fallback.map_format_error(|s| &s.endpoint, e) }; let process_err = |e: Error| match &e { @@ -716,7 +716,7 @@ impl Service { let (remote_head_block, new_block_numbers_deposit, new_block_numbers_block_cache) = endpoints .first_success(|e| async move { - get_remote_head_and_new_block_ranges(e, &self, node_far_behind_seconds).await + get_remote_head_and_new_block_ranges(e, self, node_far_behind_seconds).await }) .await .map_err(|e| { @@ -881,7 +881,7 @@ impl Service { Some(range) => range, None => endpoints .first_success(|e| async move { - relevant_new_block_numbers_from_endpoint(e, &self, HeadType::Deposit).await + relevant_new_block_numbers_from_endpoint(e, self, HeadType::Deposit).await }) .await .map_err(Error::FallbackError)?, @@ -922,7 +922,7 @@ impl Service { .first_success(|e| async move { get_deposit_logs_in_range( e, - &deposit_contract_address_ref, + deposit_contract_address_ref, block_range_ref.clone(), Duration::from_millis(GET_DEPOSIT_LOG_TIMEOUT_MILLIS), ) @@ -1034,7 +1034,7 @@ impl Service { Some(range) => range, None => endpoints .first_success(|e| async move { - relevant_new_block_numbers_from_endpoint(e, &self, HeadType::BlockCache) + relevant_new_block_numbers_from_endpoint(e, self, HeadType::BlockCache) .await }) .await diff --git a/beacon_node/eth2_libp2p/Cargo.toml b/beacon_node/eth2_libp2p/Cargo.toml index 662f2944427..393ce5fdcc5 100644 --- a/beacon_node/eth2_libp2p/Cargo.toml +++ b/beacon_node/eth2_libp2p/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Sigma Prime "] edition = "2018" [dependencies] -discv5 = { version = "0.1.0-beta.5", features = ["libp2p"] } +discv5 = { version = "0.1.0-beta.8", features = ["libp2p"] } unsigned-varint = { version = "0.6.0", features = ["codec"] } types = { path = "../../consensus/types" } hashset_delay = { path = "../../common/hashset_delay" } @@ -16,7 +16,7 @@ eth2_ssz = "0.1.2" eth2_ssz_derive = "0.1.0" slog = { version = "2.5.2", features = ["max_level_trace"] } lighthouse_version = { path = "../../common/lighthouse_version" } -tokio = { version = "1.1.0", features = ["time", "macros"] } +tokio = { version = "1.7.1", features = ["time", "macros"] } futures = "0.3.7" futures-io = "0.3.7" error-chain = "0.12.4" @@ -42,12 +42,14 @@ regex = "1.3.9" strum = { version = "0.20", features = ["derive"] } [dependencies.libp2p] -version = "0.35.1" -default-features = false -features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns", "tcp-tokio"] +#version = "0.39.1" +#default-features = false +git = "https://github.com/sigp/rust-libp2p" +rev = "323cae1d08112052740834aa1fb262ae43e6f783" +features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns-tokio", "tcp-tokio"] [dev-dependencies] -tokio = { version = "1.1.0", features = ["full"] } +tokio = { version = "1.7.1", features = ["full"] } slog-term = "2.6.0" slog-async = "2.5.0" tempfile = "3.1.0" diff --git a/beacon_node/eth2_libp2p/src/behaviour/gossipsub_scoring_parameters.rs b/beacon_node/eth2_libp2p/src/behaviour/gossipsub_scoring_parameters.rs index 80313938c1d..71a3953ece1 100644 --- a/beacon_node/eth2_libp2p/src/behaviour/gossipsub_scoring_parameters.rs +++ b/beacon_node/eth2_libp2p/src/behaviour/gossipsub_scoring_parameters.rs @@ -17,6 +17,23 @@ const VOLUNTARY_EXIT_WEIGHT: f64 = 0.05; const PROPOSER_SLASHING_WEIGHT: f64 = 0.05; const ATTESTER_SLASHING_WEIGHT: f64 = 0.05; +/// The time window (seconds) that we expect messages to be forwarded to us in the mesh. +const MESH_MESSAGE_DELIVERIES_WINDOW: u64 = 2; + +// Const as this is used in the peer manager to prevent gossip from disconnecting peers. +pub const GREYLIST_THRESHOLD: f64 = -16000.0; + +/// Builds the peer score thresholds. +pub fn lighthouse_gossip_thresholds() -> PeerScoreThresholds { + PeerScoreThresholds { + gossip_threshold: -4000.0, + publish_threshold: -8000.0, + graylist_threshold: GREYLIST_THRESHOLD, + accept_px_threshold: 100.0, + opportunistic_graft_threshold: 5.0, + } +} + pub struct PeerScoreSettings { slot: Duration, epoch: Duration, @@ -75,7 +92,7 @@ impl PeerScoreSettings { decay_to_zero: self.decay_to_zero, retain_score: self.epoch * 100, app_specific_weight: 1.0, - ip_colocation_factor_threshold: 3.0, + ip_colocation_factor_threshold: 8.0, // Allow up to 8 nodes per IP behaviour_penalty_threshold: 6.0, behaviour_penalty_decay: self.score_parameter_decay(self.epoch * 10), ..Default::default() @@ -313,10 +330,10 @@ impl PeerScoreSettings { cap_factor * t_params.mesh_message_deliveries_threshold }; t_params.mesh_message_deliveries_activation = activation_window; - t_params.mesh_message_deliveries_window = Duration::from_secs(2); + t_params.mesh_message_deliveries_window = + Duration::from_secs(MESH_MESSAGE_DELIVERIES_WINDOW); t_params.mesh_failure_penalty_decay = t_params.mesh_message_deliveries_decay; - t_params.mesh_message_deliveries_weight = -self.max_positive_score - / (t_params.topic_weight * t_params.mesh_message_deliveries_threshold.powi(2)); + t_params.mesh_message_deliveries_weight = -t_params.topic_weight; t_params.mesh_failure_penalty_weight = t_params.mesh_message_deliveries_weight; if decay_slots >= current_slot.as_u64() { t_params.mesh_message_deliveries_threshold = 0.0; diff --git a/beacon_node/eth2_libp2p/src/behaviour/handler/delegate.rs b/beacon_node/eth2_libp2p/src/behaviour/handler/delegate.rs deleted file mode 100644 index f849114f311..00000000000 --- a/beacon_node/eth2_libp2p/src/behaviour/handler/delegate.rs +++ /dev/null @@ -1,368 +0,0 @@ -use crate::behaviour::Gossipsub; -use crate::rpc::*; -use libp2p::{ - core::either::{EitherError, EitherOutput}, - core::upgrade::{EitherUpgrade, InboundUpgrade, OutboundUpgrade, SelectUpgrade, UpgradeError}, - identify::Identify, - swarm::{ - protocols_handler::{ - KeepAlive, ProtocolsHandlerEvent, ProtocolsHandlerUpgrErr, SubstreamProtocol, - }, - NegotiatedSubstream, NetworkBehaviour, ProtocolsHandler, - }, -}; -use std::task::{Context, Poll}; -use types::EthSpec; - -/* Auxiliary types for simplicity */ -type GossipHandler = ::ProtocolsHandler; -type RPCHandler = as NetworkBehaviour>::ProtocolsHandler; -type IdentifyHandler = ::ProtocolsHandler; - -/// Handler that combines Lighthouse's Behaviours' handlers in a delegating manner. -pub(super) struct DelegatingHandler { - /// Handler for the Gossipsub protocol. - gossip_handler: GossipHandler, - /// Handler for the RPC protocol. - rpc_handler: RPCHandler, - /// Handler for the Identify protocol. - identify_handler: IdentifyHandler, -} - -impl DelegatingHandler { - pub fn new(gossipsub: &mut Gossipsub, rpc: &mut RPC, identify: &mut Identify) -> Self { - DelegatingHandler { - gossip_handler: gossipsub.new_handler(), - rpc_handler: rpc.new_handler(), - identify_handler: identify.new_handler(), - } - } - - /// Gives mutable access to the rpc handler. - pub fn rpc_mut(&mut self) -> &mut RPCHandler { - &mut self.rpc_handler - } - - /// Gives access to the rpc handler. - pub fn rpc(&self) -> &RPCHandler { - &self.rpc_handler - } - - /// Gives access to identify's handler. - pub fn _identify(&self) -> &IdentifyHandler { - &self.identify_handler - } -} - -/// Wrapper around the `ProtocolsHandler::InEvent` types of the handlers. -/// Simply delegated to the corresponding behaviour's handler. -#[derive(Debug, Clone)] -pub enum DelegateIn { - Gossipsub(::InEvent), - RPC( as ProtocolsHandler>::InEvent), - Identify(::InEvent), -} - -/// Wrapper around the `ProtocolsHandler::OutEvent` types of the handlers. -/// Simply delegated to the corresponding behaviour's handler. -pub enum DelegateOut { - Gossipsub(::OutEvent), - RPC( as ProtocolsHandler>::OutEvent), - Identify(Box<::OutEvent>), -} - -/// Wrapper around the `ProtocolsHandler::Error` types of the handlers. -/// Simply delegated to the corresponding behaviour's handler. -#[derive(Debug)] -pub enum DelegateError { - Gossipsub(::Error), - RPC( as ProtocolsHandler>::Error), - Identify(::Error), - Disconnected, -} - -impl std::error::Error for DelegateError {} - -impl std::fmt::Display for DelegateError { - fn fmt( - &self, - formater: &mut std::fmt::Formatter<'_>, - ) -> std::result::Result<(), std::fmt::Error> { - match self { - DelegateError::Gossipsub(err) => err.fmt(formater), - DelegateError::RPC(err) => err.fmt(formater), - DelegateError::Identify(err) => err.fmt(formater), - DelegateError::Disconnected => write!(formater, "Disconnected"), - } - } -} - -pub type DelegateInProto = SelectUpgrade< - ::InboundProtocol, - SelectUpgrade< - as ProtocolsHandler>::InboundProtocol, - ::InboundProtocol, - >, ->; - -pub type DelegateOutProto = EitherUpgrade< - ::OutboundProtocol, - EitherUpgrade< - as ProtocolsHandler>::OutboundProtocol, - ::OutboundProtocol, - >, ->; - -pub type DelegateOutInfo = EitherOutput< - ::OutboundOpenInfo, - EitherOutput< - as ProtocolsHandler>::OutboundOpenInfo, - ::OutboundOpenInfo, - >, ->; - -impl ProtocolsHandler for DelegatingHandler { - type InEvent = DelegateIn; - type OutEvent = DelegateOut; - type Error = DelegateError; - type InboundProtocol = DelegateInProto; - type OutboundProtocol = DelegateOutProto; - type OutboundOpenInfo = DelegateOutInfo; - type InboundOpenInfo = (); - - fn listen_protocol(&self) -> SubstreamProtocol { - let gossip_proto = self.gossip_handler.listen_protocol(); - let rpc_proto = self.rpc_handler.listen_protocol(); - let identify_proto = self.identify_handler.listen_protocol(); - - let timeout = *gossip_proto - .timeout() - .max(rpc_proto.timeout()) - .max(identify_proto.timeout()); - - let select = SelectUpgrade::new( - gossip_proto.into_upgrade().1, - SelectUpgrade::new(rpc_proto.into_upgrade().1, identify_proto.into_upgrade().1), - ); - - SubstreamProtocol::new(select, ()).with_timeout(timeout) - } - - fn inject_fully_negotiated_inbound( - &mut self, - out: >::Output, - _info: Self::InboundOpenInfo, - ) { - match out { - // Gossipsub - EitherOutput::First(out) => { - self.gossip_handler.inject_fully_negotiated_inbound(out, ()) - } - // RPC - EitherOutput::Second(EitherOutput::First(out)) => { - self.rpc_handler.inject_fully_negotiated_inbound(out, ()) - } - // Identify - EitherOutput::Second(EitherOutput::Second(out)) => self - .identify_handler - .inject_fully_negotiated_inbound(out, ()), - } - } - - fn inject_fully_negotiated_outbound( - &mut self, - protocol: >::Output, - info: Self::OutboundOpenInfo, - ) { - match (protocol, info) { - // Gossipsub - (EitherOutput::First(protocol), EitherOutput::First(info)) => self - .gossip_handler - .inject_fully_negotiated_outbound(protocol, info), - // RPC - ( - EitherOutput::Second(EitherOutput::First(protocol)), - EitherOutput::Second(EitherOutput::First(info)), - ) => self - .rpc_handler - .inject_fully_negotiated_outbound(protocol, info), - // Identify - ( - EitherOutput::Second(EitherOutput::Second(protocol)), - EitherOutput::Second(EitherOutput::Second(())), - ) => self - .identify_handler - .inject_fully_negotiated_outbound(protocol, ()), - // Reaching here means we got a protocol and info for different behaviours - _ => unreachable!("output and protocol don't match"), - } - } - - fn inject_event(&mut self, event: Self::InEvent) { - match event { - DelegateIn::Gossipsub(ev) => self.gossip_handler.inject_event(ev), - DelegateIn::RPC(ev) => self.rpc_handler.inject_event(ev), - DelegateIn::Identify(()) => self.identify_handler.inject_event(()), - } - } - - fn inject_dial_upgrade_error( - &mut self, - info: Self::OutboundOpenInfo, - error: ProtocolsHandlerUpgrErr< - >::Error, - >, - ) { - match info { - // Gossipsub - EitherOutput::First(info) => match error { - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) => { - self.gossip_handler.inject_dial_upgrade_error( - info, - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)), - ) - } - ProtocolsHandlerUpgrErr::Timer => self - .gossip_handler - .inject_dial_upgrade_error(info, ProtocolsHandlerUpgrErr::Timer), - ProtocolsHandlerUpgrErr::Timeout => self - .gossip_handler - .inject_dial_upgrade_error(info, ProtocolsHandlerUpgrErr::Timeout), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(EitherError::A(err))) => { - self.gossip_handler.inject_dial_upgrade_error( - info, - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)), - ) - } - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(_)) => { - unreachable!("info and error don't match") - } - }, - // RPC - EitherOutput::Second(EitherOutput::First(info)) => match error { - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) => { - self.rpc_handler.inject_dial_upgrade_error( - info, - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)), - ) - } - ProtocolsHandlerUpgrErr::Timer => self - .rpc_handler - .inject_dial_upgrade_error(info, ProtocolsHandlerUpgrErr::Timer), - ProtocolsHandlerUpgrErr::Timeout => self - .rpc_handler - .inject_dial_upgrade_error(info, ProtocolsHandlerUpgrErr::Timeout), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(EitherError::B( - EitherError::A(err), - ))) => self.rpc_handler.inject_dial_upgrade_error( - info, - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)), - ), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(_)) => { - unreachable!("info and error don't match") - } - }, - // Identify - EitherOutput::Second(EitherOutput::Second(())) => match error { - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) => { - self.identify_handler.inject_dial_upgrade_error( - (), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)), - ) - } - ProtocolsHandlerUpgrErr::Timer => self - .identify_handler - .inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timer), - ProtocolsHandlerUpgrErr::Timeout => self - .identify_handler - .inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timeout), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(EitherError::B( - EitherError::B(err), - ))) => self.identify_handler.inject_dial_upgrade_error( - (), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)), - ), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(_)) => { - unreachable!("info and error don't match") - } - }, - } - } - - fn connection_keep_alive(&self) -> KeepAlive { - self.gossip_handler - .connection_keep_alive() - .max(self.rpc_handler.connection_keep_alive()) - .max(self.identify_handler.connection_keep_alive()) - } - - #[allow(clippy::type_complexity)] - fn poll( - &mut self, - cx: &mut Context, - ) -> Poll< - ProtocolsHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::OutEvent, - Self::Error, - >, - > { - match self.gossip_handler.poll(cx) { - Poll::Ready(ProtocolsHandlerEvent::Custom(event)) => { - return Poll::Ready(ProtocolsHandlerEvent::Custom(DelegateOut::Gossipsub(event))); - } - Poll::Ready(ProtocolsHandlerEvent::Close(event)) => { - return Poll::Ready(ProtocolsHandlerEvent::Close(DelegateError::Gossipsub( - event, - ))); - } - Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol }) => { - return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: protocol - .map_upgrade(EitherUpgrade::A) - .map_info(EitherOutput::First), - }); - } - Poll::Pending => (), - }; - - match self.rpc_handler.poll(cx) { - Poll::Ready(ProtocolsHandlerEvent::Custom(event)) => { - return Poll::Ready(ProtocolsHandlerEvent::Custom(DelegateOut::RPC(event))); - } - Poll::Ready(ProtocolsHandlerEvent::Close(event)) => { - return Poll::Ready(ProtocolsHandlerEvent::Close(DelegateError::RPC(event))); - } - Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol }) => { - return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: protocol - .map_upgrade(|u| EitherUpgrade::B(EitherUpgrade::A(u))) - .map_info(|info| EitherOutput::Second(EitherOutput::First(info))), - }); - } - Poll::Pending => (), - }; - - match self.identify_handler.poll(cx) { - Poll::Ready(ProtocolsHandlerEvent::Custom(event)) => { - return Poll::Ready(ProtocolsHandlerEvent::Custom(DelegateOut::Identify( - Box::new(event), - ))); - } - Poll::Ready(ProtocolsHandlerEvent::Close(event)) => { - return Poll::Ready(ProtocolsHandlerEvent::Close(DelegateError::Identify(event))); - } - Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol }) => { - return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: protocol - .map_upgrade(|u| EitherUpgrade::B(EitherUpgrade::B(u))) - .map_info(|_| EitherOutput::Second(EitherOutput::Second(()))), - }); - } - Poll::Pending => (), - }; - - Poll::Pending - } -} diff --git a/beacon_node/eth2_libp2p/src/behaviour/handler/mod.rs b/beacon_node/eth2_libp2p/src/behaviour/handler/mod.rs deleted file mode 100644 index d587ea6549a..00000000000 --- a/beacon_node/eth2_libp2p/src/behaviour/handler/mod.rs +++ /dev/null @@ -1,132 +0,0 @@ -use crate::behaviour::Gossipsub; -use crate::rpc::*; -use delegate::DelegatingHandler; -pub(super) use delegate::{ - DelegateError, DelegateIn, DelegateInProto, DelegateOut, DelegateOutInfo, DelegateOutProto, -}; -use libp2p::{ - core::upgrade::{InboundUpgrade, OutboundUpgrade}, - identify::Identify, - swarm::protocols_handler::{ - KeepAlive, ProtocolsHandlerEvent, ProtocolsHandlerUpgrErr, SubstreamProtocol, - }, - swarm::{NegotiatedSubstream, ProtocolsHandler}, -}; -use std::task::{Context, Poll}; -use types::EthSpec; - -mod delegate; - -/// Handler that combines Lighthouse's Behaviours' handlers in a delegating manner. -pub struct BehaviourHandler { - /// Handler combining all sub behaviour's handlers. - delegate: DelegatingHandler, - /// Flag indicating if the handler is shutting down. - shutting_down: bool, -} - -impl BehaviourHandler { - pub fn new(gossipsub: &mut Gossipsub, rpc: &mut RPC, identify: &mut Identify) -> Self { - BehaviourHandler { - delegate: DelegatingHandler::new(gossipsub, rpc, identify), - shutting_down: false, - } - } -} - -#[derive(Clone)] -pub enum BehaviourHandlerIn { - Delegate(DelegateIn), - /// Start the shutdown process. - Shutdown(Option<(RequestId, OutboundRequest)>), -} - -impl ProtocolsHandler for BehaviourHandler { - type InEvent = BehaviourHandlerIn; - type OutEvent = DelegateOut; - type Error = DelegateError; - type InboundProtocol = DelegateInProto; - type OutboundProtocol = DelegateOutProto; - type OutboundOpenInfo = DelegateOutInfo; - type InboundOpenInfo = (); - - fn listen_protocol(&self) -> SubstreamProtocol { - self.delegate.listen_protocol() - } - - fn inject_fully_negotiated_inbound( - &mut self, - out: >::Output, - _info: Self::InboundOpenInfo, - ) { - self.delegate.inject_fully_negotiated_inbound(out, ()) - } - - fn inject_fully_negotiated_outbound( - &mut self, - out: >::Output, - info: Self::OutboundOpenInfo, - ) { - self.delegate.inject_fully_negotiated_outbound(out, info) - } - - fn inject_event(&mut self, event: Self::InEvent) { - match event { - BehaviourHandlerIn::Delegate(delegated_ev) => self.delegate.inject_event(delegated_ev), - /* Events coming from the behaviour */ - BehaviourHandlerIn::Shutdown(last_message) => { - self.shutting_down = true; - self.delegate.rpc_mut().shutdown(last_message); - } - } - } - - fn inject_dial_upgrade_error( - &mut self, - info: Self::OutboundOpenInfo, - err: ProtocolsHandlerUpgrErr< - >::Error, - >, - ) { - self.delegate.inject_dial_upgrade_error(info, err) - } - - // We don't use the keep alive to disconnect. This is handled in the poll - fn connection_keep_alive(&self) -> KeepAlive { - KeepAlive::Yes - } - - #[allow(clippy::type_complexity)] - fn poll( - &mut self, - cx: &mut Context, - ) -> Poll< - ProtocolsHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::OutEvent, - Self::Error, - >, - > { - // Disconnect if the sub-handlers are ready. - // Currently we only respect the RPC handler. - if self.shutting_down && KeepAlive::No == self.delegate.rpc().connection_keep_alive() { - return Poll::Ready(ProtocolsHandlerEvent::Close(DelegateError::Disconnected)); - } - - match self.delegate.poll(cx) { - Poll::Ready(ProtocolsHandlerEvent::Custom(event)) => { - return Poll::Ready(ProtocolsHandlerEvent::Custom(event)) - } - Poll::Ready(ProtocolsHandlerEvent::Close(err)) => { - return Poll::Ready(ProtocolsHandlerEvent::Close(err)) - } - Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol }) => { - return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol }); - } - Poll::Pending => (), - } - - Poll::Pending - } -} diff --git a/beacon_node/eth2_libp2p/src/behaviour/mod.rs b/beacon_node/eth2_libp2p/src/behaviour/mod.rs index fd37030e1be..c509da6a312 100644 --- a/beacon_node/eth2_libp2p/src/behaviour/mod.rs +++ b/beacon_node/eth2_libp2p/src/behaviour/mod.rs @@ -1,7 +1,9 @@ -use crate::behaviour::gossipsub_scoring_parameters::PeerScoreSettings; +use crate::behaviour::gossipsub_scoring_parameters::{ + lighthouse_gossip_thresholds, PeerScoreSettings, +}; +use crate::discovery::{subnet_predicate, Discovery, DiscoveryEvent, TARGET_SUBNET_PEERS}; use crate::peer_manager::{ - score::{PeerAction, ReportSource}, - ConnectionDirection, PeerManager, PeerManagerEvent, + score::ReportSource, ConnectionDirection, PeerManager, PeerManagerEvent, }; use crate::rpc::*; use crate::service::METADATA_FILENAME; @@ -12,24 +14,21 @@ use crate::types::{ use crate::Eth2Enr; use crate::{error, metrics, Enr, NetworkConfig, NetworkGlobals, PubsubMessage, TopicHash}; use futures::prelude::*; -use handler::{BehaviourHandler, BehaviourHandlerIn, DelegateIn, DelegateOut}; use libp2p::{ core::{ - connection::{ConnectedPoint, ConnectionId, ListenerId}, - identity::Keypair, - Multiaddr, + connection::ConnectionId, identity::Keypair, multiaddr::Protocol as MProtocol, Multiaddr, }, gossipsub::{ subscription_filter::{MaxCountSubscriptionFilter, WhitelistSubscriptionFilter}, Gossipsub as BaseGossipsub, GossipsubEvent, IdentTopic as Topic, MessageAcceptance, - MessageAuthenticity, MessageId, PeerScoreThresholds, + MessageAuthenticity, MessageId, }, - identify::{Identify, IdentifyEvent}, + identify::{Identify, IdentifyConfig, IdentifyEvent}, swarm::{ - AddressScore, NetworkBehaviour, NetworkBehaviourAction as NBAction, NotifyHandler, - PollParameters, ProtocolsHandler, + AddressScore, DialPeerCondition, NetworkBehaviourAction as NBAction, + NetworkBehaviourEventProcess, PollParameters, }, - PeerId, + NetworkBehaviour, PeerId, }; use slog::{crit, debug, o, trace, warn}; use ssz::Encode; @@ -45,11 +44,9 @@ use std::{ }; use types::{ChainSpec, EnrForkId, EthSpec, SignedBeaconBlock, Slot, SubnetId}; -mod gossipsub_scoring_parameters; -mod handler; +pub mod gossipsub_scoring_parameters; const MAX_IDENTIFY_ADDRESSES: usize = 10; -pub const GOSSIPSUB_GREYLIST_THRESHOLD: f64 = -16000.0; /// Identifier of requests sent by a peer. pub type PeerRequestId = (ConnectionId, SubstreamId); @@ -61,11 +58,15 @@ pub type Gossipsub = BaseGossipsub; #[derive(Debug)] pub enum BehaviourEvent { /// We have successfully dialed and connected to a peer. - PeerDialed(PeerId), + PeerConnectedOutgoing(PeerId), /// A peer has successfully dialed and connected to us. - PeerConnected(PeerId), + PeerConnectedIncoming(PeerId), /// A peer has disconnected. PeerDisconnected(PeerId), + /// The peer needs to be banned. + PeerBanned(PeerId), + /// The peer has been unbanned. + PeerUnbanned(PeerId), /// An RPC Request that was sent failed. RPCFailed { /// The id of the failed request. @@ -103,68 +104,98 @@ pub enum BehaviourEvent { StatusPeer(PeerId), } +/// Internal type to pass messages from sub-behaviours to the poll of the global behaviour to be +/// specified as an NBAction. +enum InternalBehaviourMessage { + /// Dial a Peer. + DialPeer(PeerId), + /// The socket has been updated. + SocketUpdated(Multiaddr), +} + /// Builds the network behaviour that manages the core protocols of eth2. /// This core behaviour is managed by `Behaviour` which adds peer management to all core /// behaviours. +#[derive(NetworkBehaviour)] +#[behaviour(out_event = "BehaviourEvent", poll_method = "poll")] pub struct Behaviour { + /* Sub-Behaviours */ /// The routing pub-sub mechanism for eth2. gossipsub: Gossipsub, /// The Eth2 RPC specified in the wire-0 protocol. eth2_rpc: RPC, + /// Discv5 Discovery protocol. + discovery: Discovery, /// Keep regular connection to peers and disconnect if absent. // NOTE: The id protocol is used for initial interop. This will be removed by mainnet. /// Provides IP addresses and peer information. identify: Identify, + + /* Auxiliary Fields */ /// The peer manager that keeps track of peer's reputation and status. + #[behaviour(ignore)] peer_manager: PeerManager, /// The output events generated by this behaviour to be consumed in the swarm poll. + #[behaviour(ignore)] events: VecDeque>, - /// Queue of peers to disconnect and an optional reason for the disconnection. - peers_to_dc: VecDeque<(PeerId, Option)>, + /// Internal behaviour events, the NBAction type is composed of sub-behaviours, so we use a + /// custom type here to avoid having to specify the concrete type. + #[behaviour(ignore)] + internal_events: VecDeque, /// A collections of variables accessible outside the network service. + #[behaviour(ignore)] network_globals: Arc>, /// Keeps track of the current EnrForkId for upgrading gossipsub topics. // NOTE: This can be accessed via the network_globals ENR. However we keep it here for quick // lookups for every gossipsub message send. + #[behaviour(ignore)] enr_fork_id: EnrForkId, - /// The waker for the current thread. + /// The waker for the current task. This is used to wake the task when events are added to the + /// queue. + #[behaviour(ignore)] waker: Option, - /// Directory where metadata is stored + /// Directory where metadata is stored. + #[behaviour(ignore)] network_dir: PathBuf, - /// Logger for behaviour actions. - log: slog::Logger, - + /// Gossipsub score parameters. + #[behaviour(ignore)] score_settings: PeerScoreSettings, - /// The interval for updating gossipsub scores + #[behaviour(ignore)] update_gossipsub_scores: tokio::time::Interval, + /// Logger for behaviour actions. + #[behaviour(ignore)] + log: slog::Logger, } /// Implements the combined behaviour for the libp2p service. impl Behaviour { pub async fn new( local_key: &Keypair, - net_conf: &NetworkConfig, + config: &NetworkConfig, network_globals: Arc>, log: &slog::Logger, chain_spec: &ChainSpec, ) -> error::Result { let behaviour_log = log.new(o!()); - let identify = if net_conf.private { - Identify::new( - "".into(), + // Set up the Identify Behaviour + let identify_config = if config.private { + IdentifyConfig::new( "".into(), local_key.public(), // Still send legitimate public key ) } else { - Identify::new( - "lighthouse/libp2p".into(), - lighthouse_version::version_with_platform(), - local_key.public(), - ) + IdentifyConfig::new("eth2/1.0.0".into(), local_key.public()) + .with_agent_version(lighthouse_version::version_with_platform()) }; + // Build and start the discovery sub-behaviour + let mut discovery = Discovery::new(local_key, config, network_globals.clone(), log).await?; + // start searching for peers + discovery.discover_peers(); + + // Grab our local ENR FORK ID let enr_fork_id = network_globals .local_enr() .eth2() @@ -177,32 +208,26 @@ impl Behaviour { max_subscriptions_per_request: 100, //this is according to the current go implementation }; - // Initialize the compression transform. - let snappy_transform = SnappyTransform::new(net_conf.gs_config.max_transmit_size()); - + // Build and configure the Gossipsub behaviour + let snappy_transform = SnappyTransform::new(config.gs_config.max_transmit_size()); let mut gossipsub = Gossipsub::new_with_subscription_filter_and_transform( MessageAuthenticity::Anonymous, - net_conf.gs_config.clone(), + config.gs_config.clone(), filter, snappy_transform, ) .map_err(|e| format!("Could not construct gossipsub: {:?}", e))?; - //we don't know the number of active validators and the current slot yet + // Construct a set of gossipsub peer scoring parameters + // We don't know the number of active validators and the current slot yet let active_validators = TSpec::minimum_validator_count(); let current_slot = Slot::new(0); - let thresholds = PeerScoreThresholds { - gossip_threshold: -4000.0, - publish_threshold: -8000.0, - graylist_threshold: GOSSIPSUB_GREYLIST_THRESHOLD, - accept_px_threshold: 100.0, - opportunistic_graft_threshold: 5.0, - }; + let thresholds = lighthouse_gossip_thresholds(); - let score_settings = PeerScoreSettings::new(chain_spec, &net_conf.gs_config); + let score_settings = PeerScoreSettings::new(chain_spec, &config.gs_config); - //Prepare scoring parameters + // Prepare scoring parameters let params = score_settings.get_peer_score_params( active_validators, &thresholds, @@ -212,6 +237,7 @@ impl Behaviour { trace!(behaviour_log, "Using peer score params"; "params" => ?params); + // Set up a scoring update interval let update_gossipsub_scores = tokio::time::interval(params.decay_interval); gossipsub @@ -219,71 +245,35 @@ impl Behaviour { .expect("Valid score params and thresholds"); Ok(Behaviour { - eth2_rpc: RPC::new(log.clone()), + // Sub-behaviours gossipsub, - identify, - peer_manager: PeerManager::new(local_key, net_conf, network_globals.clone(), log) - .await?, + eth2_rpc: RPC::new(log.clone()), + discovery, + identify: Identify::new(identify_config), + // Auxiliary fields + peer_manager: PeerManager::new(config, network_globals.clone(), log).await?, events: VecDeque::new(), - peers_to_dc: VecDeque::new(), + internal_events: VecDeque::new(), network_globals, enr_fork_id, waker: None, - network_dir: net_conf.network_dir.clone(), + network_dir: config.network_dir.clone(), log: behaviour_log, score_settings, update_gossipsub_scores, }) } - pub fn update_gossipsub_parameters( - &mut self, - active_validators: usize, - current_slot: Slot, - ) -> error::Result<()> { - let (beacon_block_params, beacon_aggregate_proof_params, beacon_attestation_subnet_params) = - self.score_settings - .get_dynamic_topic_params(active_validators, current_slot)?; - - let fork_digest = self.enr_fork_id.fork_digest; - let get_topic = |kind: GossipKind| -> Topic { - GossipTopic::new(kind, GossipEncoding::default(), fork_digest).into() - }; - - debug!(self.log, "Updating gossipsub score parameters"; - "active_validators" => active_validators); - trace!(self.log, "Updated gossipsub score parameters"; - "beacon_block_params" => ?beacon_block_params, - "beacon_aggregate_proof_params" => ?beacon_aggregate_proof_params, - "beacon_attestation_subnet_params" => ?beacon_attestation_subnet_params, - ); - - self.gossipsub - .set_topic_params(get_topic(GossipKind::BeaconBlock), beacon_block_params)?; - - self.gossipsub.set_topic_params( - get_topic(GossipKind::BeaconAggregateAndProof), - beacon_aggregate_proof_params, - )?; + /* Public Accessible Functions to interact with the behaviour */ - for i in 0..self.score_settings.attestation_subnet_count() { - self.gossipsub.set_topic_params( - get_topic(GossipKind::Attestation(SubnetId::new(i))), - beacon_attestation_subnet_params.clone(), - )?; - } - - Ok(()) + /// Get a mutable reference to the underlying discovery sub-behaviour. + pub fn discovery_mut(&mut self) -> &mut Discovery { + &mut self.discovery } - /// Attempts to connect to a libp2p peer. - /// - /// This MUST be used over Swarm::dial() as this keeps track of the peer in the peer manager. - /// - /// All external dials, dial a multiaddr. This is currently unused but kept here in case any - /// part of lighthouse needs to connect to a peer_id in the future. - pub fn dial(&mut self, peer_id: &PeerId) { - self.peer_manager.dial_peer(peer_id); + /// Get a mutable reference to the peer manager. + pub fn peer_manager_mut(&mut self) -> &mut PeerManager { + &mut self.peer_manager } /// Returns the local ENR of the node. @@ -455,6 +445,48 @@ impl Behaviour { } } + /// Updates the current gossipsub scoring parameters based on the validator count and current + /// slot. + pub fn update_gossipsub_parameters( + &mut self, + active_validators: usize, + current_slot: Slot, + ) -> error::Result<()> { + let (beacon_block_params, beacon_aggregate_proof_params, beacon_attestation_subnet_params) = + self.score_settings + .get_dynamic_topic_params(active_validators, current_slot)?; + + let fork_digest = self.enr_fork_id.fork_digest; + let get_topic = |kind: GossipKind| -> Topic { + GossipTopic::new(kind, GossipEncoding::default(), fork_digest).into() + }; + + debug!(self.log, "Updating gossipsub score parameters"; + "active_validators" => active_validators); + trace!(self.log, "Updated gossipsub score parameters"; + "beacon_block_params" => ?beacon_block_params, + "beacon_aggregate_proof_params" => ?beacon_aggregate_proof_params, + "beacon_attestation_subnet_params" => ?beacon_attestation_subnet_params, + ); + + self.gossipsub + .set_topic_params(get_topic(GossipKind::BeaconBlock), beacon_block_params)?; + + self.gossipsub.set_topic_params( + get_topic(GossipKind::BeaconAggregateAndProof), + beacon_aggregate_proof_params, + )?; + + for i in 0..self.score_settings.attestation_subnet_count() { + self.gossipsub.set_topic_params( + get_topic(GossipKind::Attestation(SubnetId::new(i))), + beacon_attestation_subnet_params.clone(), + )?; + } + + Ok(()) + } + /* Eth2 RPC behaviour functions */ /// Send a request to a peer over RPC. @@ -487,11 +519,6 @@ impl Behaviour { /* Peer management functions */ - /// Report a peer's action. - pub fn report_peer(&mut self, peer_id: &PeerId, action: PeerAction, source: ReportSource) { - self.peer_manager.report_peer(peer_id, action, source) - } - /// Disconnects from a peer providing a reason. /// /// This will send a goodbye, disconnect and then ban the peer. @@ -502,23 +529,19 @@ impl Behaviour { /// Returns an iterator over all enr entries in the DHT. pub fn enr_entries(&mut self) -> Vec { - self.peer_manager.discovery_mut().table_entries_enr() + self.discovery.table_entries_enr() } /// Add an ENR to the routing table of the discovery mechanism. pub fn add_enr(&mut self, enr: Enr) { - self.peer_manager.discovery_mut().add_enr(enr); + self.discovery.add_enr(enr); } /// Updates a subnet value to the ENR bitfield. /// /// The `value` is `true` if a subnet is being added and false otherwise. pub fn update_enr_subnet(&mut self, subnet_id: SubnetId, value: bool) { - if let Err(e) = self - .peer_manager - .discovery_mut() - .update_enr_bitfield(subnet_id, value) - { + if let Err(e) = self.discovery.update_enr_bitfield(subnet_id, value) { crit!(self.log, "Could not update ENR bitfield"; "error" => e); } // update the local meta data which informs our peers of the update during PINGS @@ -527,16 +550,58 @@ impl Behaviour { /// Attempts to discover new peers for a given subnet. The `min_ttl` gives the time at which we /// would like to retain the peers for. - pub fn discover_subnet_peers(&mut self, subnet_subscriptions: Vec) { - self.peer_manager - .discover_subnet_peers(subnet_subscriptions) + pub fn discover_subnet_peers(&mut self, subnets_to_discover: Vec) { + // If discovery is not started or disabled, ignore the request + if !self.discovery.started { + return; + } + + let filtered: Vec = subnets_to_discover + .into_iter() + .filter(|s| { + // Extend min_ttl of connected peers on required subnets + if let Some(min_ttl) = s.min_ttl { + self.network_globals + .peers + .write() + .extend_peers_on_subnet(s.subnet_id, min_ttl); + } + // Already have target number of peers, no need for subnet discovery + let peers_on_subnet = self + .network_globals + .peers + .read() + .good_peers_on_subnet(s.subnet_id) + .count(); + if peers_on_subnet >= TARGET_SUBNET_PEERS { + trace!( + self.log, + "Discovery query ignored"; + "subnet_id" => ?s.subnet_id, + "reason" => "Already connected to desired peers", + "connected_peers_on_subnet" => peers_on_subnet, + "target_subnet_peers" => TARGET_SUBNET_PEERS, + ); + false + // Queue an outgoing connection request to the cached peers that are on `s.subnet_id`. + // If we connect to the cached peers before the discovery query starts, then we potentially + // save a costly discovery query. + } else { + self.dial_cached_enrs_in_subnet(s.subnet_id); + true + } + }) + .collect(); + + // request the subnet query from discovery + if !filtered.is_empty() { + self.discovery.discover_subnet_peers(filtered); + } } /// Updates the local ENR's "eth2" field with the latest EnrForkId. pub fn update_fork_version(&mut self, enr_fork_id: EnrForkId) { - self.peer_manager - .discovery_mut() - .update_eth2_enr(enr_fork_id.clone()); + self.discovery.update_eth2_enr(enr_fork_id.clone()); // unsubscribe from all gossip topics and re-subscribe to their new fork counterparts let subscribed_topics = self @@ -567,8 +632,7 @@ impl Behaviour { /// Updates the current meta data of the node to match the local ENR. fn update_metadata(&mut self) { let local_attnets = self - .peer_manager - .discovery() + .discovery .local_enr() .bitfield::() .expect("Local discovery must have bitfield"); @@ -629,7 +693,97 @@ impl Behaviour { &mut self.peer_manager } - fn on_gossip_event(&mut self, event: GossipsubEvent) { + // RPC Propagation methods + /// Queues the response to be sent upwards as long at it was requested outside the Behaviour. + fn propagate_response(&mut self, id: RequestId, peer_id: PeerId, response: Response) { + if !matches!(id, RequestId::Behaviour) { + self.add_event(BehaviourEvent::ResponseReceived { + peer_id, + id, + response, + }); + } + } + + /// Convenience function to propagate a request. + fn propagate_request(&mut self, id: PeerRequestId, peer_id: PeerId, request: Request) { + self.add_event(BehaviourEvent::RequestReceived { + peer_id, + id, + request, + }); + } + + /// Adds an event to the queue waking the current task to process it. + fn add_event(&mut self, event: BehaviourEvent) { + self.events.push_back(event); + if let Some(waker) = &self.waker { + waker.wake_by_ref(); + } + } + + /// Dial cached enrs in discovery service that are in the given `subnet_id` and aren't + /// in Connected, Dialing or Banned state. + fn dial_cached_enrs_in_subnet(&mut self, subnet_id: SubnetId) { + let predicate = subnet_predicate::(vec![subnet_id], &self.log); + let peers_to_dial: Vec = self + .discovery + .cached_enrs() + .filter_map(|(peer_id, enr)| { + let peers = self.network_globals.peers.read(); + if predicate(enr) && peers.should_dial(peer_id) { + Some(*peer_id) + } else { + None + } + }) + .collect(); + for peer_id in peers_to_dial { + debug!(self.log, "Dialing cached ENR peer"; "peer_id" => %peer_id); + // Remove the ENR from the cache to prevent continual re-dialing on disconnects + self.discovery.remove_cached_enr(&peer_id); + self.internal_events + .push_back(InternalBehaviourMessage::DialPeer(peer_id)); + } + } + + /// Creates a whitelist topic filter that covers all possible topics using the given set of + /// possible fork digests. + fn create_whitelist_filter( + possible_fork_digests: Vec<[u8; 4]>, + attestation_subnet_count: u64, + ) -> WhitelistSubscriptionFilter { + let mut possible_hashes = HashSet::new(); + for fork_digest in possible_fork_digests { + let mut add = |kind| { + let topic: Topic = + GossipTopic::new(kind, GossipEncoding::SSZSnappy, fork_digest).into(); + possible_hashes.insert(topic.hash()); + }; + + use GossipKind::*; + add(BeaconBlock); + add(BeaconAggregateAndProof); + add(VoluntaryExit); + add(ProposerSlashing); + add(AttesterSlashing); + for id in 0..attestation_subnet_count { + add(Attestation(SubnetId::new(id))); + } + } + WhitelistSubscriptionFilter(possible_hashes) + } +} + +/* Behaviour Event Process Implementations + * + * These implementations dictate how to process each event that is emitted from each + * sub-behaviour. + */ + +// Gossipsub +impl NetworkBehaviourEventProcess for Behaviour { + fn inject_event(&mut self, event: GossipsubEvent) { match event { GossipsubEvent::Message { propagation_source, @@ -673,43 +827,25 @@ impl Behaviour { } } } +} - /// Queues the response to be sent upwards as long at it was requested outside the Behaviour. - fn propagate_response(&mut self, id: RequestId, peer_id: PeerId, response: Response) { - if !matches!(id, RequestId::Behaviour) { - self.add_event(BehaviourEvent::ResponseReceived { - peer_id, - id, - response, - }); - } - } - - /// Convenience function to propagate a request. - fn propagate_request(&mut self, id: PeerRequestId, peer_id: PeerId, request: Request) { - self.add_event(BehaviourEvent::RequestReceived { - peer_id, - id, - request, - }); - } - - fn on_rpc_event(&mut self, message: RPCMessage) { - let peer_id = message.peer_id; +// RPC +impl NetworkBehaviourEventProcess> for Behaviour { + fn inject_event(&mut self, event: RPCMessage) { + let peer_id = event.peer_id; if !self.peer_manager.is_connected(&peer_id) { - //ignore this event debug!( self.log, - "Ignoring rpc message of disconnected peer"; + "Ignoring rpc message of disconnecting peer"; "peer" => %peer_id ); return; } - let handler_id = message.conn_id; + let handler_id = event.conn_id; // The METADATA and PING RPC responses are handled within the behaviour and not propagated - match message.event { + match event.event { Err(handler_err) => { match handler_err { HandlerErr::Inbound { @@ -767,12 +903,10 @@ impl Behaviour { "reason" => %reason, "client" => %self.network_globals.client(&peer_id), ); - self.peers_to_dc.push_back((peer_id, None)); // NOTE: We currently do not inform the application that we are - // disconnecting here. - // The actual disconnection event will be relayed to the application. Ideally - // this time difference is short, but we may need to introduce a message to - // inform the application layer early. + // disconnecting here. The RPC handler will automatically + // disconnect for us. + // The actual disconnection event will be relayed to the application. } /* Protocols propagated to the Network */ InboundRequest::Status(msg) => { @@ -822,38 +956,127 @@ impl Behaviour { } } } +} + +// Discovery +impl NetworkBehaviourEventProcess for Behaviour { + fn inject_event(&mut self, event: DiscoveryEvent) { + match event { + DiscoveryEvent::SocketUpdated(socket_addr) => { + // A new UDP socket has been detected. + // Build a multiaddr to report to libp2p + let mut multiaddr = Multiaddr::from(socket_addr.ip()); + // NOTE: This doesn't actually track the external TCP port. More sophisticated NAT handling + // should handle this. + multiaddr.push(MProtocol::Tcp(self.network_globals.listen_port_tcp())); + self.internal_events + .push_back(InternalBehaviourMessage::SocketUpdated(multiaddr)); + } + DiscoveryEvent::QueryResult(results) => { + let to_dial_peers = self.peer_manager.peers_discovered(results); + for peer_id in to_dial_peers { + debug!(self.log, "Dialing discovered peer"; "peer_id" => %peer_id); + self.internal_events + .push_back(InternalBehaviourMessage::DialPeer(peer_id)); + } + } + } + } +} + +// Identify +impl NetworkBehaviourEventProcess for Behaviour { + fn inject_event(&mut self, event: IdentifyEvent) { + match event { + IdentifyEvent::Received { peer_id, mut info } => { + if info.listen_addrs.len() > MAX_IDENTIFY_ADDRESSES { + debug!( + self.log, + "More than 10 addresses have been identified, truncating" + ); + info.listen_addrs.truncate(MAX_IDENTIFY_ADDRESSES); + } + // send peer info to the peer manager. + self.peer_manager.identify(&peer_id, &info); + + debug!(self.log, "Identified Peer"; "peer" => %peer_id, + "protocol_version" => info.protocol_version, + "agent_version" => info.agent_version, + "listening_ addresses" => ?info.listen_addrs, + "observed_address" => ?info.observed_addr, + "protocols" => ?info.protocols + ); + } + IdentifyEvent::Sent { .. } => {} + IdentifyEvent::Error { .. } => {} + IdentifyEvent::Pushed { .. } => {} + } + } +} - /// Consumes the events list when polled. - fn custom_poll( +impl Behaviour { + /// Consumes the events list and drives the Lighthouse global NetworkBehaviour. + fn poll( &mut self, cx: &mut Context, - ) -> Poll, BehaviourEvent>> { - // handle pending disconnections to perform - if let Some((peer_id, reason)) = self.peers_to_dc.pop_front() { - return Poll::Ready(NBAction::NotifyHandler { - peer_id, - handler: NotifyHandler::Any, - event: BehaviourHandlerIn::Shutdown( - reason.map(|reason| (RequestId::Behaviour, OutboundRequest::Goodbye(reason))), - ), - }); + _: &mut impl PollParameters, + ) -> Poll>> { + if let Some(waker) = &self.waker { + if waker.will_wake(cx.waker()) { + self.waker = Some(cx.waker().clone()); + } + } else { + self.waker = Some(cx.waker().clone()); + } + + // Handle internal events first + if let Some(event) = self.internal_events.pop_front() { + match event { + InternalBehaviourMessage::DialPeer(peer_id) => { + return Poll::Ready(NBAction::DialPeer { + peer_id, + condition: DialPeerCondition::Disconnected, + }); + } + InternalBehaviourMessage::SocketUpdated(address) => { + return Poll::Ready(NBAction::ReportObservedAddr { + address, + score: AddressScore::Finite(1), + }); + } + } } // check the peer manager for events loop { match self.peer_manager.poll_next_unpin(cx) { Poll::Ready(Some(event)) => match event { - PeerManagerEvent::Dial(peer_id) => { - return Poll::Ready(NBAction::DialPeer { + PeerManagerEvent::PeerConnectedIncoming(peer_id) => { + return Poll::Ready(NBAction::GenerateEvent( + BehaviourEvent::PeerConnectedIncoming(peer_id), + )); + } + PeerManagerEvent::PeerConnectedOutgoing(peer_id) => { + return Poll::Ready(NBAction::GenerateEvent( + BehaviourEvent::PeerConnectedOutgoing(peer_id), + )); + } + PeerManagerEvent::PeerDisconnected(peer_id) => { + return Poll::Ready(NBAction::GenerateEvent( + BehaviourEvent::PeerDisconnected(peer_id), + )); + } + PeerManagerEvent::Banned(peer_id, associated_ips) => { + self.discovery.ban_peer(&peer_id, associated_ips); + return Poll::Ready(NBAction::GenerateEvent(BehaviourEvent::PeerBanned( peer_id, - condition: libp2p::swarm::DialPeerCondition::Disconnected, - }); + ))); } - PeerManagerEvent::SocketUpdated(address) => { - return Poll::Ready(NBAction::ReportObservedAddr { - address, - score: AddressScore::Finite(1), - }); + PeerManagerEvent::UnBanned(peer_id, associated_ips) => { + self.discovery.unban_peer(&peer_id, associated_ips); + return Poll::Ready(NBAction::GenerateEvent(BehaviourEvent::PeerUnbanned( + peer_id, + ))); } PeerManagerEvent::Status(peer_id) => { // it's time to status. We don't keep a beacon chain reference here, so we inform @@ -862,6 +1085,10 @@ impl Behaviour { peer_id, ))); } + PeerManagerEvent::DiscoverPeers => { + // Peer manager has requested a discovery query for more peers. + self.discovery.discover_peers(); + } PeerManagerEvent::Ping(peer_id) => { // send a ping request to this peer self.ping(RequestId::Behaviour, peer_id); @@ -870,17 +1097,10 @@ impl Behaviour { self.send_meta_data_request(peer_id); } PeerManagerEvent::DisconnectPeer(peer_id, reason) => { - debug!(self.log, "PeerManager disconnecting peer"; + debug!(self.log, "Peer Manager disconnecting peer"; "peer_id" => %peer_id, "reason" => %reason); // send one goodbye - return Poll::Ready(NBAction::NotifyHandler { - peer_id, - handler: NotifyHandler::Any, - event: BehaviourHandlerIn::Shutdown(Some(( - RequestId::Behaviour, - OutboundRequest::Goodbye(reason), - ))), - }); + self.eth2_rpc.shutdown(peer_id, reason); } }, Poll::Pending => break, @@ -899,381 +1119,6 @@ impl Behaviour { Poll::Pending } - - fn on_identify_event(&mut self, event: IdentifyEvent) { - match event { - IdentifyEvent::Received { - peer_id, - mut info, - observed_addr, - } => { - if info.listen_addrs.len() > MAX_IDENTIFY_ADDRESSES { - debug!( - self.log, - "More than 10 addresses have been identified, truncating" - ); - info.listen_addrs.truncate(MAX_IDENTIFY_ADDRESSES); - } - // send peer info to the peer manager. - self.peer_manager.identify(&peer_id, &info); - - debug!(self.log, "Identified Peer"; "peer" => %peer_id, - "protocol_version" => info.protocol_version, - "agent_version" => info.agent_version, - "listening_ addresses" => ?info.listen_addrs, - "observed_address" => ?observed_addr, - "protocols" => ?info.protocols - ); - } - IdentifyEvent::Sent { .. } => {} - IdentifyEvent::Error { .. } => {} - } - } - - /// Adds an event to the queue waking the current thread to process it. - fn add_event(&mut self, event: BehaviourEvent) { - self.events.push_back(event); - if let Some(waker) = &self.waker { - waker.wake_by_ref(); - } - } - - /// Creates a whitelist topic filter that covers all possible topics using the given set of - /// possible fork digests. - fn create_whitelist_filter( - possible_fork_digests: Vec<[u8; 4]>, - attestation_subnet_count: u64, - ) -> WhitelistSubscriptionFilter { - let mut possible_hashes = HashSet::new(); - for fork_digest in possible_fork_digests { - let mut add = |kind| { - let topic: Topic = - GossipTopic::new(kind, GossipEncoding::SSZSnappy, fork_digest).into(); - possible_hashes.insert(topic.hash()); - }; - - use GossipKind::*; - add(BeaconBlock); - add(BeaconAggregateAndProof); - add(VoluntaryExit); - add(ProposerSlashing); - add(AttesterSlashing); - for id in 0..attestation_subnet_count { - add(Attestation(SubnetId::new(id))); - } - } - WhitelistSubscriptionFilter(possible_hashes) - } -} - -/// Calls the given function with the given args on all sub behaviours. -macro_rules! delegate_to_behaviours { - ($self: ident, $fn: ident, $($arg: ident), *) => { - $self.gossipsub.$fn($($arg),*); - $self.eth2_rpc.$fn($($arg),*); - $self.identify.$fn($($arg),*); - }; -} - -impl NetworkBehaviour for Behaviour { - type ProtocolsHandler = BehaviourHandler; - type OutEvent = BehaviourEvent; - - fn new_handler(&mut self) -> Self::ProtocolsHandler { - BehaviourHandler::new(&mut self.gossipsub, &mut self.eth2_rpc, &mut self.identify) - } - - fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { - self.peer_manager.addresses_of_peer(peer_id) - } - - // This gets called every time a connection is established. - // NOTE: The current logic implies that we would reject extra connections for already connected - // peers if we have reached our peer limit. This is fine for the time being as we currently - // only allow a single connection per peer. - fn inject_connection_established( - &mut self, - peer_id: &PeerId, - conn_id: &ConnectionId, - endpoint: &ConnectedPoint, - ) { - let goodbye_reason: Option = if self.peer_manager.is_banned(peer_id) { - // If the peer is banned, send goodbye with reason banned. - // A peer that has recently transitioned to the banned state should be in the - // disconnecting state, but the `is_banned()` function is dependent on score so should - // be true here in this case. - Some(GoodbyeReason::Banned) - } else if self.peer_manager.peer_limit_reached() - && self - .network_globals - .peers - .read() - .peer_info(peer_id) - .map_or(true, |i| !i.has_future_duty()) - { - // If we are at our peer limit and we don't need the peer for a future validator - // duty, send goodbye with reason TooManyPeers - Some(GoodbyeReason::TooManyPeers) - } else { - None - }; - - if let Some(goodbye_reason) = goodbye_reason { - match goodbye_reason { - GoodbyeReason::Banned => { - debug!(self.log, "Disconnecting newly connected peer"; "peer_id" => %peer_id, "reason" => %goodbye_reason) - } - _ => { - trace!(self.log, "Disconnecting newly connected peer"; "peer_id" => %peer_id, "reason" => %goodbye_reason) - } - } - self.peers_to_dc.push_back((*peer_id, Some(goodbye_reason))); - // NOTE: We don't inform the peer manager that this peer is disconnecting. It is simply - // rejected with a goodbye. - return; - } - - // All peers at this point will be registered as being connected. - // Notify the peer manager of a successful connection - match endpoint { - ConnectedPoint::Listener { send_back_addr, .. } => { - self.peer_manager - .connect_ingoing(&peer_id, send_back_addr.clone()); - self.add_event(BehaviourEvent::PeerConnected(*peer_id)); - debug!(self.log, "Connection established"; "peer_id" => %peer_id, "connection" => "Incoming"); - } - ConnectedPoint::Dialer { address } => { - self.peer_manager - .connect_outgoing(&peer_id, address.clone()); - self.add_event(BehaviourEvent::PeerDialed(*peer_id)); - debug!(self.log, "Connection established"; "peer_id" => %peer_id, "connection" => "Dialed"); - } - } - // report the event to the behaviour - delegate_to_behaviours!( - self, - inject_connection_established, - peer_id, - conn_id, - endpoint - ); - } - - // This gets called on the initial connection establishment. - // NOTE: This gets called after inject_connection_established. Therefore the logic in that - // function dictates the logic here. - fn inject_connected(&mut self, peer_id: &PeerId) { - // If the PeerManager has connected this peer, inform the behaviours - if !self.network_globals.peers.read().is_connected(&peer_id) { - return; - } - - // increment prometheus metrics - metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT); - metrics::set_gauge( - &metrics::PEERS_CONNECTED, - self.network_globals.connected_peers() as i64, - ); - - delegate_to_behaviours!(self, inject_connected, peer_id); - } - - // This gets called every time a connection is closed. - // NOTE: The peer manager state can be modified in the lifetime of the peer. Due to the scoring - // mechanism. Peers can become banned. In this case, we still want to inform the behaviours. - fn inject_connection_closed( - &mut self, - peer_id: &PeerId, - conn_id: &ConnectionId, - endpoint: &ConnectedPoint, - ) { - // If the peer manager (and therefore the behaviour's) believe this peer connected, inform - // about the disconnection. - // It could be the peer was in the process of being disconnected. In this case the - // sub-behaviours are expecting this peer to be connected and we inform them. - if self - .network_globals - .peers - .read() - .is_connected_or_disconnecting(peer_id) - { - // We are disconnecting the peer or the peer has already been connected. - // Both these cases, the peer has been previously registered in the sub protocols. - delegate_to_behaviours!(self, inject_connection_closed, peer_id, conn_id, endpoint); - } - } - - // This gets called once there are no more active connections. - fn inject_disconnected(&mut self, peer_id: &PeerId) { - // If the application/behaviour layers thinks this peer has connected inform it of the disconnect. - - // Remove all subnet subscriptions from peerdb for the disconnected peer. - self.peer_manager().remove_all_subscriptions(&peer_id); - - if self - .network_globals - .peers - .read() - .is_connected_or_disconnecting(peer_id) - { - // We are disconnecting the peer or the peer has already been connected. - // Both these cases, the peer has been previously registered in the sub protocols and - // potentially the application layer. - // Inform the application. - self.add_event(BehaviourEvent::PeerDisconnected(*peer_id)); - // Inform the behaviour. - delegate_to_behaviours!(self, inject_disconnected, peer_id); - - debug!(self.log, "Peer disconnected"; "peer_id" => %peer_id); - - // Decrement the PEERS_PER_CLIENT metric - if let Some(kind) = self - .network_globals - .peers - .read() - .peer_info(peer_id) - .map(|info| info.client.kind.clone()) - { - if let Some(v) = - metrics::get_int_gauge(&metrics::PEERS_PER_CLIENT, &[&kind.to_string()]) - { - v.dec() - }; - } - } - - // Inform the peer manager. - // NOTE: It may be the case that a rejected node, due to too many peers is disconnected - // here and the peer manager has no knowledge of its connection. We insert it here for - // reference so that peer manager can track this peer. - self.peer_manager.notify_disconnect(&peer_id); - - // Update the prometheus metrics - metrics::inc_counter(&metrics::PEER_DISCONNECT_EVENT_COUNT); - metrics::set_gauge( - &metrics::PEERS_CONNECTED, - self.network_globals.connected_peers() as i64, - ); - } - - fn inject_addr_reach_failure( - &mut self, - peer_id: Option<&PeerId>, - addr: &Multiaddr, - error: &dyn std::error::Error, - ) { - delegate_to_behaviours!(self, inject_addr_reach_failure, peer_id, addr, error); - } - - fn inject_dial_failure(&mut self, peer_id: &PeerId) { - // Could not dial the peer, inform the peer manager. - self.peer_manager.notify_dial_failure(&peer_id); - delegate_to_behaviours!(self, inject_dial_failure, peer_id); - } - - fn inject_new_listen_addr(&mut self, addr: &Multiaddr) { - delegate_to_behaviours!(self, inject_new_listen_addr, addr); - } - - fn inject_expired_listen_addr(&mut self, addr: &Multiaddr) { - delegate_to_behaviours!(self, inject_expired_listen_addr, addr); - } - - fn inject_new_external_addr(&mut self, addr: &Multiaddr) { - delegate_to_behaviours!(self, inject_new_external_addr, addr); - } - - fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn std::error::Error + 'static)) { - delegate_to_behaviours!(self, inject_listener_error, id, err); - } - fn inject_listener_closed(&mut self, id: ListenerId, reason: Result<(), &std::io::Error>) { - delegate_to_behaviours!(self, inject_listener_closed, id, reason); - } - - fn inject_event( - &mut self, - peer_id: PeerId, - conn_id: ConnectionId, - event: ::OutEvent, - ) { - // If the peer is not supposed to be connected (undergoing active disconnection, - // don't process any of its messages. - if !self.network_globals.peers.read().is_connected(&peer_id) { - return; - } - - // Events comming from the handler, redirected to each behaviour - match event { - DelegateOut::Gossipsub(ev) => self.gossipsub.inject_event(peer_id, conn_id, ev), - DelegateOut::RPC(ev) => self.eth2_rpc.inject_event(peer_id, conn_id, ev), - DelegateOut::Identify(ev) => self.identify.inject_event(peer_id, conn_id, *ev), - } - } - - fn poll( - &mut self, - cx: &mut Context, - poll_params: &mut impl PollParameters, - ) -> Poll::InEvent, Self::OutEvent>> { - // update the waker if needed - if let Some(waker) = &self.waker { - if waker.will_wake(cx.waker()) { - self.waker = Some(cx.waker().clone()); - } - } else { - self.waker = Some(cx.waker().clone()); - } - - macro_rules! poll_behaviour { - /* $behaviour: The sub-behaviour being polled. - * $on_event_fn: Function to call if we get an event from the sub-behaviour. - * $notify_handler_event_closure: Closure mapping the received event type to - * the one that the handler should get. - */ - ($behaviour: ident, $on_event_fn: ident, $notify_handler_event_closure: expr) => { - loop { - // poll the sub-behaviour - match self.$behaviour.poll(cx, poll_params) { - Poll::Ready(action) => match action { - // call the designated function to handle the event from sub-behaviour - NBAction::GenerateEvent(event) => self.$on_event_fn(event), - NBAction::DialAddress { address } => { - return Poll::Ready(NBAction::DialAddress { address }) - } - NBAction::DialPeer { peer_id, condition } => { - return Poll::Ready(NBAction::DialPeer { peer_id, condition }) - } - NBAction::NotifyHandler { - peer_id, - handler, - event, - } => { - return Poll::Ready(NBAction::NotifyHandler { - peer_id, - handler, - // call the closure mapping the received event to the needed one - // in order to notify the handler - event: BehaviourHandlerIn::Delegate( - $notify_handler_event_closure(event), - ), - }); - } - NBAction::ReportObservedAddr { address, score } => { - return Poll::Ready(NBAction::ReportObservedAddr { address, score }) - } - }, - Poll::Pending => break, - } - } - }; - } - - poll_behaviour!(gossipsub, on_gossip_event, DelegateIn::Gossipsub); - poll_behaviour!(eth2_rpc, on_rpc_event, DelegateIn::RPC); - poll_behaviour!(identify, on_identify_event, DelegateIn::Identify); - - self.custom_poll(cx) - } } /* Public API types */ diff --git a/beacon_node/eth2_libp2p/src/config.rs b/beacon_node/eth2_libp2p/src/config.rs index 8b1979b4a27..5add5fdf9aa 100644 --- a/beacon_node/eth2_libp2p/src/config.rs +++ b/beacon_node/eth2_libp2p/src/config.rs @@ -14,13 +14,15 @@ use sha2::{Digest, Sha256}; use std::path::PathBuf; use std::time::Duration; +/// The maximum transmit size of gossip messages in bytes. pub const GOSSIP_MAX_SIZE: usize = 1_048_576; +/// This is a constant to be used in discovery. The lower bound of the gossipsub mesh. +pub const MESH_N_LOW: usize = 6; // We treat uncompressed messages as invalid and never use the INVALID_SNAPPY_DOMAIN as in the // specification. We leave it here for posterity. // const MESSAGE_DOMAIN_INVALID_SNAPPY: [u8; 4] = [0, 0, 0, 0]; const MESSAGE_DOMAIN_VALID_SNAPPY: [u8; 4] = [1, 0, 0, 0]; -pub const MESH_N_LOW: usize = 6; #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(default)] @@ -138,8 +140,8 @@ impl Default for Config { .mesh_n_high(12) .gossip_lazy(6) .fanout_ttl(Duration::from_secs(60)) - .history_length(6) - .max_messages_per_rpc(Some(10)) + .history_length(12) + .max_messages_per_rpc(Some(500)) // Responses to IWANT can be quite large .history_gossip(3) .validate_messages() // require validation before propagation .validation_mode(ValidationMode::Anonymous) @@ -151,10 +153,20 @@ impl Default for Config { .build() .expect("valid gossipsub configuration"); + // Discv5 Unsolicited Packet Rate Limiter + let filter_rate_limiter = Some( + discv5::RateLimiterBuilder::new() + .total_n_every(10, Duration::from_secs(1)) // Allow bursts, average 10 per second + .ip_n_every(9, Duration::from_secs(1)) // Allow bursts, average 9 per second + .node_n_every(8, Duration::from_secs(1)) // Allow bursts, average 8 per second + .build() + .expect("The total rate limit has been specified"), + ); + // discv5 configuration let discv5_config = Discv5ConfigBuilder::new() .enable_packet_filter() - .session_cache_capacity(1000) + .session_cache_capacity(5000) .request_timeout(Duration::from_secs(1)) .query_peer_timeout(Duration::from_secs(2)) .query_timeout(Duration::from_secs(30)) @@ -163,6 +175,11 @@ impl Default for Config { .query_parallelism(5) .disable_report_discovered_peers() .ip_limit() // limits /24 IP's in buckets. + .incoming_bucket_limit(8) // half the bucket size + .filter_rate_limiter(filter_rate_limiter) + .filter_max_bans_per_ip(Some(5)) + .filter_max_nodes_per_ip(Some(10)) + .ban_duration(Some(Duration::from_secs(3600))) .ping_interval(Duration::from_secs(300)) .build(); diff --git a/beacon_node/eth2_libp2p/src/discovery/enr.rs b/beacon_node/eth2_libp2p/src/discovery/enr.rs index 534d79c51ed..a8f05863626 100644 --- a/beacon_node/eth2_libp2p/src/discovery/enr.rs +++ b/beacon_node/eth2_libp2p/src/discovery/enr.rs @@ -67,7 +67,7 @@ pub fn use_or_load_enr( Ok(disk_enr) => { // if the same node id, then we may need to update our sequence number if local_enr.node_id() == disk_enr.node_id() { - if compare_enr(&local_enr, &disk_enr) { + if compare_enr(local_enr, &disk_enr) { debug!(log, "ENR loaded from disk"; "file" => ?enr_f); // the stored ENR has the same configuration, use it *local_enr = disk_enr; @@ -92,7 +92,7 @@ pub fn use_or_load_enr( } } - save_enr_to_disk(&config.network_dir, &local_enr, log); + save_enr_to_disk(&config.network_dir, local_enr, log); Ok(()) } @@ -193,7 +193,7 @@ pub fn load_enr_from_disk(dir: &Path) -> Result { pub fn save_enr_to_disk(dir: &Path, enr: &Enr, log: &slog::Logger) { let _ = std::fs::create_dir_all(dir); match File::create(dir.join(Path::new(ENR_FILENAME))) - .and_then(|mut f| f.write_all(&enr.to_base64().as_bytes())) + .and_then(|mut f| f.write_all(enr.to_base64().as_bytes())) { Ok(_) => { debug!(log, "ENR written to disk"); diff --git a/beacon_node/eth2_libp2p/src/discovery/enr_ext.rs b/beacon_node/eth2_libp2p/src/discovery/enr_ext.rs index a6f718abb2c..164318cae28 100644 --- a/beacon_node/eth2_libp2p/src/discovery/enr_ext.rs +++ b/beacon_node/eth2_libp2p/src/discovery/enr_ext.rs @@ -254,7 +254,7 @@ pub fn peer_id_to_node_id(peer_id: &PeerId) -> Result Discovery { let listen_socket = SocketAddr::new(config.listen_address, config.discovery_port); // convert the keypair into an ENR key - let enr_key: CombinedKey = CombinedKey::from_libp2p(&local_key)?; + let enr_key: CombinedKey = CombinedKey::from_libp2p(local_key)?; let mut discv5 = Discv5::new(local_enr, enr_key, config.discv5_config.clone()) .map_err(|e| format!("Discv5 service failed. Error: {:?}", e))?; @@ -295,6 +307,11 @@ impl Discovery { self.cached_enrs.iter() } + /// Removes a cached ENR from the list. + pub fn remove_cached_enr(&mut self, peer_id: &PeerId) -> Option { + self.cached_enrs.pop(peer_id) + } + /// This adds a new `FindPeers` query to the queue if one doesn't already exist. pub fn discover_peers(&mut self) { // If the discv5 service isn't running or we are in the process of a query, don't bother queuing a new one. @@ -492,33 +509,38 @@ impl Discovery { // first try and convert the peer_id to a node_id. if let Ok(node_id) = peer_id_to_node_id(peer_id) { // If we could convert this peer id, remove it from the DHT and ban it from discovery. - self.discv5.ban_node(&node_id); + self.discv5.ban_node(&node_id, None); // Remove the node from the routing table. self.discv5.remove_node(&node_id); } for ip_address in ip_addresses { - self.discv5.ban_ip(ip_address); + self.discv5.ban_ip(ip_address, None); } } + /// Unbans the peer in discovery. pub fn unban_peer(&mut self, peer_id: &PeerId, ip_addresses: Vec) { // first try and convert the peer_id to a node_id. if let Ok(node_id) = peer_id_to_node_id(peer_id) { // If we could convert this peer id, remove it from the DHT and ban it from discovery. - self.discv5.permit_node(&node_id); + self.discv5.ban_node_remove(&node_id); } for ip_address in ip_addresses { - self.discv5.permit_ip(ip_address); + self.discv5.ban_ip_remove(&ip_address); } } - // mark node as disconnected in DHT, freeing up space for other nodes + /// Marks node as disconnected in the DHT, freeing up space for other nodes, this also removes + /// nodes from the cached ENR list. pub fn disconnect_peer(&mut self, peer_id: &PeerId) { if let Ok(node_id) = peer_id_to_node_id(peer_id) { self.discv5.disconnect_node(&node_id); } + // Remove the peer from the cached list, to prevent redialing disconnected + // peers. + self.cached_enrs.pop(peer_id); } /* Internal Functions */ @@ -727,7 +749,11 @@ impl Discovery { }; // predicate for finding nodes with a matching fork and valid tcp port let eth2_fork_predicate = move |enr: &Enr| { - enr.eth2() == Ok(enr_fork_id.clone()) && (enr.tcp().is_some() || enr.tcp6().is_some()) + // `next_fork_epoch` and `next_fork_version` can be different so that + // we can connect to peers who aren't compatible with an upcoming fork. + // `fork_digest` **must** be same. + enr.eth2().map(|e| e.fork_digest) == Ok(enr_fork_id.fork_digest) + && (enr.tcp().is_some() || enr.tcp6().is_some()) }; // General predicate @@ -871,9 +897,68 @@ impl Discovery { } None } +} + +/* NetworkBehaviour Implementation */ + +impl NetworkBehaviour for Discovery { + // Discovery is not a real NetworkBehaviour... + type ProtocolsHandler = libp2p::swarm::protocols_handler::DummyProtocolsHandler; + type OutEvent = DiscoveryEvent; + + fn new_handler(&mut self) -> Self::ProtocolsHandler { + libp2p::swarm::protocols_handler::DummyProtocolsHandler::default() + } - // Main execution loop to be driven by the peer manager. - pub fn poll(&mut self, cx: &mut Context) -> Poll { + // Handles the libp2p request to obtain multiaddrs for peer_id's in order to dial them. + fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { + if let Some(enr) = self.enr_of_peer(peer_id) { + // ENR's may have multiple Multiaddrs. The multi-addr associated with the UDP + // port is removed, which is assumed to be associated with the discv5 protocol (and + // therefore irrelevant for other libp2p components). + enr.multiaddr_tcp() + } else { + // PeerId is not known + Vec::new() + } + } + + fn inject_connected(&mut self, _peer_id: &PeerId) {} + fn inject_disconnected(&mut self, _peer_id: &PeerId) {} + fn inject_connection_established( + &mut self, + _: &PeerId, + _: &ConnectionId, + _connected_point: &ConnectedPoint, + ) { + } + fn inject_connection_closed( + &mut self, + _: &PeerId, + _: &ConnectionId, + _connected_point: &ConnectedPoint, + ) { + } + fn inject_event( + &mut self, + _: PeerId, + _: ConnectionId, + _: ::OutEvent, + ) { + } + + fn inject_dial_failure(&mut self, peer_id: &PeerId) { + // set peer as disconnected in discovery DHT + debug!(self.log, "Marking peer disconnected in DHT"; "peer_id" => %peer_id); + self.disconnect_peer(peer_id); + } + + // Main execution loop to drive the behaviour + fn poll( + &mut self, + cx: &mut Context, + _: &mut impl PollParameters, + ) -> Poll::InEvent, Self::OutEvent>> { if !self.started { return Poll::Pending; } @@ -884,7 +969,9 @@ impl Discovery { // Drive the queries and return any results from completed queries if let Some(results) = self.poll_queries(cx) { // return the result to the peer manager - return Poll::Ready(DiscoveryEvent::QueryResult(results)); + return Poll::Ready(NBAction::GenerateEvent(DiscoveryEvent::QueryResult( + results, + ))); } // Process the server event stream @@ -932,9 +1019,13 @@ impl Discovery { enr::save_enr_to_disk(Path::new(&self.enr_dir), &enr, &self.log); // update network globals *self.network_globals.local_enr.write() = enr; - return Poll::Ready(DiscoveryEvent::SocketUpdated(socket)); + return Poll::Ready(NBAction::GenerateEvent( + DiscoveryEvent::SocketUpdated(socket), + )); } - _ => {} // Ignore all other discv5 server events + Discv5Event::EnrAdded { .. } + | Discv5Event::TalkRequest(_) + | Discv5Event::NodeInserted { .. } => {} // Ignore all other discv5 server events } } } diff --git a/beacon_node/eth2_libp2p/src/peer_manager/mod.rs b/beacon_node/eth2_libp2p/src/peer_manager/mod.rs index 5f87015a0ed..26234a93fde 100644 --- a/beacon_node/eth2_libp2p/src/peer_manager/mod.rs +++ b/beacon_node/eth2_libp2p/src/peer_manager/mod.rs @@ -1,20 +1,19 @@ -//! Implementation of a Lighthouse's peer management system. +//! Implementation of Lighthouse's peer management system. pub use self::peerdb::*; -use crate::discovery::{subnet_predicate, Discovery, DiscoveryEvent, TARGET_SUBNET_PEERS}; use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RPCResponseErrorCode}; use crate::types::SyncState; use crate::{error, metrics, Gossipsub}; -use crate::{EnrExt, NetworkConfig, NetworkGlobals, PeerId, SubnetDiscovery}; +use crate::{NetworkConfig, NetworkGlobals, PeerId}; +use discv5::Enr; use futures::prelude::*; use futures::Stream; use hashset_delay::HashSetDelay; -use libp2p::core::multiaddr::Protocol as MProtocol; +use libp2p::core::ConnectedPoint; use libp2p::identify::IdentifyInfo; -use slog::{crit, debug, error, trace, warn}; +use slog::{crit, debug, error, warn}; use smallvec::SmallVec; use std::{ - net::SocketAddr, pin::Pin, sync::Arc, task::{Context, Poll}, @@ -36,6 +35,7 @@ pub use peer_sync_status::{PeerSyncStatus, SyncInfo}; use score::{PeerAction, ReportSource, ScoreState}; use std::cmp::Ordering; use std::collections::HashMap; +use std::net::IpAddr; /// The time in seconds between re-status's peers. const STATUS_INTERVAL: u64 = 300; @@ -54,15 +54,14 @@ const HEARTBEAT_INTERVAL: u64 = 30; /// A fraction of `PeerManager::target_peers` that we allow to connect to us in excess of /// `PeerManager::target_peers`. For clarity, if `PeerManager::target_peers` is 50 and /// PEER_EXCESS_FACTOR = 0.1 we allow 10% more nodes, i.e 55. -const PEER_EXCESS_FACTOR: f32 = 0.1; +pub const PEER_EXCESS_FACTOR: f32 = 0.1; +/// A fraction of `PeerManager::target_peers` that need to be outbound-only connections. +pub const MIN_OUTBOUND_ONLY_FACTOR: f32 = 0.1; /// Relative factor of peers that are allowed to have a negative gossipsub score without penalizing /// them in lighthouse. const ALLOWED_NEGATIVE_GOSSIPSUB_FACTOR: f32 = 0.1; -/// A fraction of `PeerManager::target_peers` that need to be outbound-only connections. -const MIN_OUTBOUND_ONLY_FACTOR: f32 = 0.1; - /// The main struct that handles peer's reputation and connection status. pub struct PeerManager { /// Storage of network globals to access the `PeerDB`. @@ -79,20 +78,22 @@ pub struct PeerManager { target_peers: usize, /// The maximum number of peers we allow (exceptions for subnet peers) max_peers: usize, - /// The discovery service. - discovery: Discovery, /// The heartbeat interval to perform routine maintenance. heartbeat: tokio::time::Interval, + /// Keeps track of whether the discovery service is enabled or not. + discovery_enabled: bool, /// The logger associated with the `PeerManager`. log: slog::Logger, } /// The events that the `PeerManager` outputs (requests). pub enum PeerManagerEvent { - /// Dial a PeerId. - Dial(PeerId), - /// Inform libp2p that our external socket addr has been updated. - SocketUpdated(Multiaddr), + /// A peer has dialed us. + PeerConnectedIncoming(PeerId), + /// A peer has been dialed. + PeerConnectedOutgoing(PeerId), + /// A peer has disconnected. + PeerDisconnected(PeerId), /// Sends a STATUS to a peer. Status(PeerId), /// Sends a PING to a peer. @@ -101,22 +102,22 @@ pub enum PeerManagerEvent { MetaData(PeerId), /// The peer should be disconnected. DisconnectPeer(PeerId, GoodbyeReason), + /// Inform the behaviour to ban this peer and associated ip addresses. + Banned(PeerId, Vec), + /// The peer should be unbanned with the associated ip addresses. + UnBanned(PeerId, Vec), + /// Request the behaviour to discover more peers. + DiscoverPeers, } impl PeerManager { // NOTE: Must be run inside a tokio executor. pub async fn new( - local_key: &Keypair, config: &NetworkConfig, network_globals: Arc>, log: &slog::Logger, ) -> error::Result { - // start the discovery service - let mut discovery = Discovery::new(local_key, config, network_globals.clone(), log).await?; - - // start searching for peers - discovery.discover_peers(); - + // Set up the peer manager heartbeat interval let heartbeat = tokio::time::interval(tokio::time::Duration::from_secs(HEARTBEAT_INTERVAL)); Ok(PeerManager { @@ -127,22 +128,14 @@ impl PeerManager { status_peers: HashSetDelay::new(Duration::from_secs(STATUS_INTERVAL)), target_peers: config.target_peers, max_peers: (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)).ceil() as usize, - discovery, heartbeat, + discovery_enabled: !config.disable_discovery, log: log.clone(), }) } /* Public accessible functions */ - /// Attempts to connect to a peer. - /// - /// Returns true if the peer was accepted into the database. - pub fn dial_peer(&mut self, peer_id: &PeerId) -> bool { - self.events.push(PeerManagerEvent::Dial(*peer_id)); - self.connect_peer(peer_id, ConnectingType::Dialing) - } - /// The application layer wants to disconnect from a peer for a particular reason. /// /// All instant disconnections are fatal and we ban the associated peer. @@ -217,66 +210,52 @@ impl PeerManager { self.ban_and_unban_peers(to_ban_peers, to_unban_peers); } - /* Discovery Requests */ - - /// Provides a reference to the underlying discovery service. - pub fn discovery(&self) -> &Discovery { - &self.discovery - } - - /// Provides a mutable reference to the underlying discovery service. - pub fn discovery_mut(&mut self) -> &mut Discovery { - &mut self.discovery - } - - /// A request to find peers on a given subnet. - pub fn discover_subnet_peers(&mut self, subnets_to_discover: Vec) { - // If discovery is not started or disabled, ignore the request - if !self.discovery.started { - return; - } + /// Peers that have been returned by discovery requests that are suitable for dialing are + /// returned here. + /// + /// NOTE: By dialing `PeerId`s and not multiaddrs, libp2p requests the multiaddr associated + /// with a new `PeerId` which involves a discovery routing table lookup. We could dial the + /// multiaddr here, however this could relate to duplicate PeerId's etc. If the lookup + /// proves resource constraining, we should switch to multiaddr dialling here. + #[allow(clippy::mutable_key_type)] + pub fn peers_discovered(&mut self, results: HashMap>) -> Vec { + let mut to_dial_peers = Vec::new(); - let filtered: Vec = subnets_to_discover - .into_iter() - .filter(|s| { - // Extend min_ttl of connected peers on required subnets - if let Some(min_ttl) = s.min_ttl { + let connected_or_dialing = self.network_globals.connected_or_dialing_peers(); + for (peer_id, min_ttl) in results { + // we attempt a connection if this peer is a subnet peer or if the max peer count + // is not yet filled (including dialing peers) + if (min_ttl.is_some() || connected_or_dialing + to_dial_peers.len() < self.max_peers) + && self.network_globals.peers.read().should_dial(&peer_id) + { + // This should be updated with the peer dialing. In fact created once the peer is + // dialed + if let Some(min_ttl) = min_ttl { self.network_globals .peers .write() - .extend_peers_on_subnet(s.subnet_id, min_ttl); - } - // Already have target number of peers, no need for subnet discovery - let peers_on_subnet = self - .network_globals - .peers - .read() - .good_peers_on_subnet(s.subnet_id) - .count(); - if peers_on_subnet >= TARGET_SUBNET_PEERS { - trace!( - self.log, - "Discovery query ignored"; - "subnet_id" => ?s.subnet_id, - "reason" => "Already connected to desired peers", - "connected_peers_on_subnet" => peers_on_subnet, - "target_subnet_peers" => TARGET_SUBNET_PEERS, - ); - false - // Queue an outgoing connection request to the cached peers that are on `s.subnet_id`. - // If we connect to the cached peers before the discovery query starts, then we potentially - // save a costly discovery query. - } else { - self.dial_cached_enrs_in_subnet(s.subnet_id); - true + .update_min_ttl(&peer_id, min_ttl); } - }) - .collect(); + to_dial_peers.push(peer_id); + } + } + + // Queue another discovery if we need to + let peer_count = self.network_globals.connected_or_dialing_peers(); + let outbound_only_peer_count = self.network_globals.connected_outbound_only_peers(); + let min_outbound_only_target = + (self.target_peers as f32 * MIN_OUTBOUND_ONLY_FACTOR).ceil() as usize; - // request the subnet query from discovery - if !filtered.is_empty() { - self.discovery.discover_subnet_peers(filtered); + if self.discovery_enabled + && (peer_count < self.target_peers.saturating_sub(to_dial_peers.len()) + || outbound_only_peer_count < min_outbound_only_target) + { + // We need more peers, re-queue a discovery lookup. + debug!(self.log, "Starting a new peer discovery query"; "connected_peers" => peer_count, "target_peers" => self.target_peers); + self.events.push(PeerManagerEvent::DiscoverPeers); } + + to_dial_peers } /// A STATUS message has been received from a peer. This resets the status timer. @@ -307,19 +286,144 @@ impl PeerManager { /* Notifications from the Swarm */ - /// Updates the state of the peer as disconnected. - /// - /// This is also called when dialing a peer fails. - pub fn notify_disconnect(&mut self, peer_id: &PeerId) { - self.network_globals - .peers - .write() - .notify_disconnect(peer_id); + // A peer is being dialed. + pub fn inject_dialing(&mut self, peer_id: &PeerId, enr: Option) { + self.inject_peer_connection(peer_id, ConnectingType::Dialing, enr); + } - // remove the ping and status timer for the peer - self.inbound_ping_peers.remove(peer_id); - self.outbound_ping_peers.remove(peer_id); - self.status_peers.remove(peer_id); + pub fn inject_connection_established( + &mut self, + peer_id: PeerId, + endpoint: ConnectedPoint, + num_established: std::num::NonZeroU32, + enr: Option, + ) { + // Log the connection + match &endpoint { + ConnectedPoint::Listener { .. } => { + debug!(self.log, "Connection established"; "peer_id" => %peer_id, "connection" => "Incoming", "connections" => %num_established); + } + ConnectedPoint::Dialer { .. } => { + debug!(self.log, "Connection established"; "peer_id" => %peer_id, "connection" => "Outgoing", "connections" => %num_established); + } + } + + // Should not be able to connect to a banned peer. Double check here + if self.is_banned(&peer_id) { + warn!(self.log, "Connected to a banned peer"; "peer_id" => %peer_id); + self.events.push(PeerManagerEvent::DisconnectPeer( + peer_id, + GoodbyeReason::Banned, + )); + self.network_globals + .peers + .write() + .notify_disconnecting(peer_id, true); + return; + } + + // Check the connection limits + if self.peer_limit_reached() + && self + .network_globals + .peers + .read() + .peer_info(&peer_id) + .map_or(true, |peer| !peer.has_future_duty()) + { + self.events.push(PeerManagerEvent::DisconnectPeer( + peer_id, + GoodbyeReason::TooManyPeers, + )); + self.network_globals + .peers + .write() + .notify_disconnecting(peer_id, false); + return; + } + + // Register the newly connected peer (regardless if we are about to disconnect them). + // NOTE: We don't register peers that we are disconnecting immediately. The network service + // does not need to know about these peers. + match endpoint { + ConnectedPoint::Listener { send_back_addr, .. } => { + self.inject_connect_ingoing(&peer_id, send_back_addr, enr); + if num_established == std::num::NonZeroU32::new(1).expect("valid") { + self.events + .push(PeerManagerEvent::PeerConnectedIncoming(peer_id)); + } + } + ConnectedPoint::Dialer { address } => { + self.inject_connect_outgoing(&peer_id, address, enr); + if num_established == std::num::NonZeroU32::new(1).expect("valid") { + self.events + .push(PeerManagerEvent::PeerConnectedOutgoing(peer_id)); + } + } + } + + // increment prometheus metrics + metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT); + metrics::set_gauge( + &metrics::PEERS_CONNECTED, + self.network_globals.connected_peers() as i64, + ); + } + + pub fn inject_connection_closed( + &mut self, + peer_id: PeerId, + _endpoint: ConnectedPoint, + num_established: u32, + ) { + if num_established == 0 { + // There are no more connections + + // Remove all subnet subscriptions from the peer_db + self.remove_all_subscriptions(&peer_id); + + if self + .network_globals + .peers + .read() + .is_connected_or_disconnecting(&peer_id) + { + // We are disconnecting the peer or the peer has already been connected. + // Both these cases, the peer has been previously registered by the peer manager and + // potentially the application layer. + // Inform the application. + self.events + .push(PeerManagerEvent::PeerDisconnected(peer_id)); + debug!(self.log, "Peer disconnected"; "peer_id" => %peer_id); + + // Decrement the PEERS_PER_CLIENT metric + if let Some(kind) = self + .network_globals + .peers + .read() + .peer_info(&peer_id) + .map(|info| info.client.kind.clone()) + { + if let Some(v) = + metrics::get_int_gauge(&metrics::PEERS_PER_CLIENT, &[&kind.to_string()]) + { + v.dec() + }; + } + } + + // NOTE: It may be the case that a rejected node, due to too many peers is disconnected + // here and the peer manager has no knowledge of its connection. We insert it here for + // reference so that peer manager can track this peer. + self.inject_disconnect(&peer_id); + + // Update the prometheus metrics + metrics::inc_counter(&metrics::PEER_DISCONNECT_EVENT_COUNT); + metrics::set_gauge( + &metrics::PEERS_CONNECTED, + self.network_globals.connected_peers() as i64, + ); + } } /// A dial attempt has failed. @@ -327,27 +431,12 @@ impl PeerManager { /// NOTE: It can be the case that we are dialing a peer and during the dialing process the peer /// connects and the dial attempt later fails. To handle this, we only update the peer_db if /// the peer is not already connected. - pub fn notify_dial_failure(&mut self, peer_id: &PeerId) { + pub fn inject_dial_failure(&mut self, peer_id: &PeerId) { if !self.network_globals.peers.read().is_connected(peer_id) { - self.notify_disconnect(peer_id); - // set peer as disconnected in discovery DHT - debug!(self.log, "Marking peer disconnected in DHT"; "peer_id" => %peer_id); - self.discovery.disconnect_peer(peer_id); + self.inject_disconnect(peer_id); } } - /// Sets a peer as connected as long as their reputation allows it - /// Informs if the peer was accepted - pub fn connect_ingoing(&mut self, peer_id: &PeerId, multiaddr: Multiaddr) -> bool { - self.connect_peer(peer_id, ConnectingType::IngoingConnected { multiaddr }) - } - - /// Sets a peer as connected as long as their reputation allows it - /// Informs if the peer was accepted - pub fn connect_outgoing(&mut self, peer_id: &PeerId, multiaddr: Multiaddr) -> bool { - self.connect_peer(peer_id, ConnectingType::OutgoingConnected { multiaddr }) - } - /// Reports if a peer is banned or not. /// /// This is used to determine if we should accept incoming connections. @@ -483,6 +572,7 @@ impl PeerManager { }, }, RPCError::NegotiationTimeout => PeerAction::LowToleranceError, + RPCError::Disconnected => return, // No penalty for a graceful disconnection }; self.report_peer(peer_id, peer_action, ReportSource::RPC); @@ -574,22 +664,6 @@ impl PeerManager { } } - // Handles the libp2p request to obtain multiaddrs for peer_id's in order to dial them. - pub fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { - if let Some(enr) = self.discovery.enr_of_peer(peer_id) { - // ENR's may have multiple Multiaddrs. The multi-addr associated with the UDP - // port is removed, which is assumed to be associated with the discv5 protocol (and - // therefore irrelevant for other libp2p components). - let mut out_list = enr.multiaddr(); - out_list.retain(|addr| !addr.iter().any(|v| matches!(v, MProtocol::Udp(_)))); - - out_list - } else { - // PeerId is not known - Vec::new() - } - } - pub(crate) fn update_gossipsub_scores(&mut self, gossipsub: &Gossipsub) { let mut to_ban_peers = Vec::new(); let mut to_unban_peers = Vec::new(); @@ -645,71 +719,49 @@ impl PeerManager { /* Internal functions */ - // The underlying discovery server has updated our external IP address. We send this up to - // notify libp2p. - fn socket_updated(&mut self, socket: SocketAddr) { - // Build a multiaddr to report to libp2p - let mut multiaddr = Multiaddr::from(socket.ip()); - // NOTE: This doesn't actually track the external TCP port. More sophisticated NAT handling - // should handle this. - multiaddr.push(MProtocol::Tcp(self.network_globals.listen_port_tcp())); - self.events.push(PeerManagerEvent::SocketUpdated(multiaddr)); + /// Sets a peer as connected as long as their reputation allows it + /// Informs if the peer was accepted + fn inject_connect_ingoing( + &mut self, + peer_id: &PeerId, + multiaddr: Multiaddr, + enr: Option, + ) -> bool { + self.inject_peer_connection(peer_id, ConnectingType::IngoingConnected { multiaddr }, enr) } - /// Dial cached enrs in discovery service that are in the given `subnet_id` and aren't - /// in Connected, Dialing or Banned state. - fn dial_cached_enrs_in_subnet(&mut self, subnet_id: SubnetId) { - let predicate = subnet_predicate::(vec![subnet_id], &self.log); - let peers_to_dial: Vec = self - .discovery() - .cached_enrs() - .filter_map(|(peer_id, enr)| { - let peers = self.network_globals.peers.read(); - if predicate(enr) && peers.should_dial(peer_id) { - Some(*peer_id) - } else { - None - } - }) - .collect(); - for peer_id in &peers_to_dial { - debug!(self.log, "Dialing cached ENR peer"; "peer_id" => %peer_id); - self.dial_peer(peer_id); - } + /// Sets a peer as connected as long as their reputation allows it + /// Informs if the peer was accepted + fn inject_connect_outgoing( + &mut self, + peer_id: &PeerId, + multiaddr: Multiaddr, + enr: Option, + ) -> bool { + self.inject_peer_connection( + peer_id, + ConnectingType::OutgoingConnected { multiaddr }, + enr, + ) } - /// Peers that have been returned by discovery requests are dialed here if they are suitable. + /// Updates the state of the peer as disconnected. /// - /// NOTE: By dialing `PeerId`s and not multiaddrs, libp2p requests the multiaddr associated - /// with a new `PeerId` which involves a discovery routing table lookup. We could dial the - /// multiaddr here, however this could relate to duplicate PeerId's etc. If the lookup - /// proves resource constraining, we should switch to multiaddr dialling here. - #[allow(clippy::mutable_key_type)] - fn peers_discovered(&mut self, results: HashMap>) { - let mut to_dial_peers = Vec::new(); - - let connected_or_dialing = self.network_globals.connected_or_dialing_peers(); - for (peer_id, min_ttl) in results { - // we attempt a connection if this peer is a subnet peer or if the max peer count - // is not yet filled (including dialing peers) - if (min_ttl.is_some() || connected_or_dialing + to_dial_peers.len() < self.max_peers) - && self.network_globals.peers.read().should_dial(&peer_id) - { - // This should be updated with the peer dialing. In fact created once the peer is - // dialed - if let Some(min_ttl) = min_ttl { - self.network_globals - .peers - .write() - .update_min_ttl(&peer_id, min_ttl); - } - to_dial_peers.push(peer_id); - } - } - for peer_id in to_dial_peers { - debug!(self.log, "Dialing discovered peer"; "peer_id" => %peer_id); - self.dial_peer(&peer_id); + /// This is also called when dialing a peer fails. + fn inject_disconnect(&mut self, peer_id: &PeerId) { + if self + .network_globals + .peers + .write() + .inject_disconnect(peer_id) + { + self.ban_peer(peer_id); } + + // remove the ping and status timer for the peer + self.inbound_ping_peers.remove(peer_id); + self.outbound_ping_peers.remove(peer_id); + self.status_peers.remove(peer_id); } /// Registers a peer as connected. The `ingoing` parameter determines if the peer is being @@ -718,16 +770,19 @@ impl PeerManager { /// This is called by `connect_ingoing` and `connect_outgoing`. /// /// Informs if the peer was accepted in to the db or not. - fn connect_peer(&mut self, peer_id: &PeerId, connection: ConnectingType) -> bool { + fn inject_peer_connection( + &mut self, + peer_id: &PeerId, + connection: ConnectingType, + enr: Option, + ) -> bool { { let mut peerdb = self.network_globals.peers.write(); - if peerdb.is_banned(&peer_id) { + if peerdb.is_banned(peer_id) { // don't connect if the peer is banned slog::crit!(self.log, "Connection has been allowed to a banned peer"; "peer_id" => %peer_id); } - let enr = self.discovery.enr_of_peer(peer_id); - match connection { ConnectingType::Dialing => { peerdb.dialing_peer(peer_id, enr); @@ -774,6 +829,8 @@ impl PeerManager { true } + /// This handles score transitions between states. It transitions peers states from + /// disconnected/banned/connected. fn handle_score_transitions( previous_state: ScoreState, peer_id: &PeerId, @@ -814,6 +871,7 @@ impl PeerManager { } } + /// Updates the state of banned peers. fn ban_and_unban_peers(&mut self, to_ban_peers: Vec, to_unban_peers: Vec) { // process banning peers for peer_id in to_ban_peers { @@ -883,7 +941,9 @@ impl PeerManager { }) .unwrap_or_default(); - self.discovery.ban_peer(&peer_id, banned_ip_addresses); + // Inform the Swarm to ban the peer + self.events + .push(PeerManagerEvent::Banned(*peer_id, banned_ip_addresses)); } /// Unbans a peer. @@ -892,14 +952,16 @@ impl PeerManager { /// previous bans from discovery. fn unban_peer(&mut self, peer_id: &PeerId) -> Result<(), &'static str> { let mut peer_db = self.network_globals.peers.write(); - peer_db.unban(&peer_id)?; + peer_db.unban(peer_id)?; let seen_ip_addresses = peer_db .peer_info(peer_id) .map(|info| info.seen_addresses().collect::>()) .unwrap_or_default(); - self.discovery.unban_peer(&peer_id, seen_ip_addresses); + // Inform the Swarm to unban the peer + self.events + .push(PeerManagerEvent::UnBanned(*peer_id, seen_ip_addresses)); Ok(()) } @@ -915,12 +977,13 @@ impl PeerManager { let min_outbound_only_target = (self.target_peers as f32 * MIN_OUTBOUND_ONLY_FACTOR).ceil() as usize; - if peer_count < self.target_peers || outbound_only_peer_count < min_outbound_only_target { + if self.discovery_enabled + && (peer_count < self.target_peers + || outbound_only_peer_count < min_outbound_only_target) + { // If we need more peers, queue a discovery lookup. - if self.discovery.started { - debug!(self.log, "Starting a new peer discovery query"; "connected_peers" => peer_count, "target_peers" => self.target_peers); - self.discovery.discover_peers(); - } + debug!(self.log, "Starting a new peer discovery query"; "connected_peers" => peer_count, "target_peers" => self.target_peers); + self.events.push(PeerManagerEvent::DiscoverPeers); } // Updates peer's scores. @@ -959,7 +1022,7 @@ impl PeerManager { let mut peer_db = self.network_globals.peers.write(); for peer_id in disconnecting_peers { - peer_db.notify_disconnecting(&peer_id); + peer_db.notify_disconnecting(peer_id, false); self.events.push(PeerManagerEvent::DisconnectPeer( peer_id, GoodbyeReason::TooManyPeers, @@ -977,14 +1040,6 @@ impl Stream for PeerManager { self.heartbeat(); } - // handle any discovery events - while let Poll::Ready(event) = self.discovery.poll(cx) { - match event { - DiscoveryEvent::SocketUpdated(socket_addr) => self.socket_updated(socket_addr), - DiscoveryEvent::QueryResult(results) => self.peers_discovered(results), - } - } - // poll the timeouts for pings and status' loop { match self.inbound_ping_peers.poll_next_unpin(cx) { @@ -1108,7 +1163,7 @@ mod tests { vec![], &log, ); - PeerManager::new(&keypair, &config, Arc::new(globals), &log) + PeerManager::new(&config, Arc::new(globals), &log) .await .unwrap() } @@ -1125,11 +1180,19 @@ mod tests { let outbound_only_peer1 = PeerId::random(); let outbound_only_peer2 = PeerId::random(); - peer_manager.connect_ingoing(&peer0, "/ip4/0.0.0.0".parse().unwrap()); - peer_manager.connect_ingoing(&peer1, "/ip4/0.0.0.0".parse().unwrap()); - peer_manager.connect_ingoing(&peer2, "/ip4/0.0.0.0".parse().unwrap()); - peer_manager.connect_outgoing(&outbound_only_peer1, "/ip4/0.0.0.0".parse().unwrap()); - peer_manager.connect_outgoing(&outbound_only_peer2, "/ip4/0.0.0.0".parse().unwrap()); + peer_manager.inject_connect_ingoing(&peer0, "/ip4/0.0.0.0".parse().unwrap(), None); + peer_manager.inject_connect_ingoing(&peer1, "/ip4/0.0.0.0".parse().unwrap(), None); + peer_manager.inject_connect_ingoing(&peer2, "/ip4/0.0.0.0".parse().unwrap(), None); + peer_manager.inject_connect_outgoing( + &outbound_only_peer1, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + peer_manager.inject_connect_outgoing( + &outbound_only_peer2, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); // Set the outbound-only peers to have the lowest score. peer_manager @@ -1181,13 +1244,17 @@ mod tests { // Connect to 20 ingoing-only peers. for _i in 0..19 { let peer = PeerId::random(); - peer_manager.connect_ingoing(&peer, "/ip4/0.0.0.0".parse().unwrap()); + peer_manager.inject_connect_ingoing(&peer, "/ip4/0.0.0.0".parse().unwrap(), None); } // Connect an outbound-only peer. // Give it the lowest score so that it is evaluated first in the disconnect list iterator. let outbound_only_peer = PeerId::random(); - peer_manager.connect_ingoing(&outbound_only_peer, "/ip4/0.0.0.0".parse().unwrap()); + peer_manager.inject_connect_ingoing( + &outbound_only_peer, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); peer_manager .network_globals .peers @@ -1213,12 +1280,20 @@ mod tests { let inbound_only_peer1 = PeerId::random(); let outbound_only_peer1 = PeerId::random(); - peer_manager.connect_ingoing(&peer0, "/ip4/0.0.0.0".parse().unwrap()); - peer_manager.connect_outgoing(&peer0, "/ip4/0.0.0.0".parse().unwrap()); + peer_manager.inject_connect_ingoing(&peer0, "/ip4/0.0.0.0".parse().unwrap(), None); + peer_manager.inject_connect_outgoing(&peer0, "/ip4/0.0.0.0".parse().unwrap(), None); // Connect to two peers that are on the threshold of being disconnected. - peer_manager.connect_ingoing(&inbound_only_peer1, "/ip4/0.0.0.0".parse().unwrap()); - peer_manager.connect_outgoing(&outbound_only_peer1, "/ip4/0.0.0.0".parse().unwrap()); + peer_manager.inject_connect_ingoing( + &inbound_only_peer1, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + peer_manager.inject_connect_outgoing( + &outbound_only_peer1, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); peer_manager .network_globals .peers @@ -1268,12 +1343,20 @@ mod tests { let inbound_only_peer1 = PeerId::random(); let outbound_only_peer1 = PeerId::random(); - peer_manager.connect_ingoing(&peer0, "/ip4/0.0.0.0".parse().unwrap()); - peer_manager.connect_ingoing(&peer1, "/ip4/0.0.0.0".parse().unwrap()); + peer_manager.inject_connect_ingoing(&peer0, "/ip4/0.0.0.0".parse().unwrap(), None); + peer_manager.inject_connect_ingoing(&peer1, "/ip4/0.0.0.0".parse().unwrap(), None); // Connect to two peers that are on the threshold of being disconnected. - peer_manager.connect_ingoing(&inbound_only_peer1, "/ip4/0.0.0.0".parse().unwrap()); - peer_manager.connect_outgoing(&outbound_only_peer1, "/ip4/0.0.0.0".parse().unwrap()); + peer_manager.inject_connect_ingoing( + &inbound_only_peer1, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + peer_manager.inject_connect_outgoing( + &outbound_only_peer1, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); peer_manager .network_globals .peers @@ -1320,12 +1403,20 @@ mod tests { let inbound_only_peer1 = PeerId::random(); let outbound_only_peer1 = PeerId::random(); - peer_manager.connect_ingoing(&peer0, "/ip4/0.0.0.0".parse().unwrap()); - peer_manager.connect_ingoing(&peer1, "/ip4/0.0.0.0".parse().unwrap()); - peer_manager.connect_ingoing(&peer2, "/ip4/0.0.0.0".parse().unwrap()); - peer_manager.connect_outgoing(&outbound_only_peer1, "/ip4/0.0.0.0".parse().unwrap()); + peer_manager.inject_connect_ingoing(&peer0, "/ip4/0.0.0.0".parse().unwrap(), None); + peer_manager.inject_connect_ingoing(&peer1, "/ip4/0.0.0.0".parse().unwrap(), None); + peer_manager.inject_connect_ingoing(&peer2, "/ip4/0.0.0.0".parse().unwrap(), None); + peer_manager.inject_connect_outgoing( + &outbound_only_peer1, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); // Have one peer be on the verge of disconnection. - peer_manager.connect_ingoing(&inbound_only_peer1, "/ip4/0.0.0.0".parse().unwrap()); + peer_manager.inject_connect_ingoing( + &inbound_only_peer1, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); peer_manager .network_globals .peers diff --git a/beacon_node/eth2_libp2p/src/peer_manager/peer_info.rs b/beacon_node/eth2_libp2p/src/peer_manager/peer_info.rs index 43570c5aeee..c9eeae94726 100644 --- a/beacon_node/eth2_libp2p/src/peer_manager/peer_info.rs +++ b/beacon_node/eth2_libp2p/src/peer_manager/peer_info.rs @@ -198,25 +198,16 @@ impl PeerInfo { // Setters /// Modifies the status to Disconnected and sets the last seen instant to now. Returns None if - /// no changes were made. Returns Some(bool) where the bool represents if peer became banned or - /// simply just disconnected. + /// no changes were made. Returns Some(bool) where the bool represents if peer is to now be + /// baned pub fn notify_disconnect(&mut self) -> Option { match self.connection_status { Banned { .. } | Disconnected { .. } => None, Disconnecting { to_ban } => { - // If we are disconnecting this peer in the process of banning, we now ban the - // peer. - if to_ban { - self.connection_status = Banned { - since: Instant::now(), - }; - Some(true) - } else { - self.connection_status = Disconnected { - since: Instant::now(), - }; - Some(false) - } + self.connection_status = Disconnected { + since: Instant::now(), + }; + Some(to_ban) } Connected { .. } | Dialing { .. } | Unknown => { self.connection_status = Disconnected { @@ -227,11 +218,8 @@ impl PeerInfo { } } - /// Notify the we are currently disconnecting this peer, after which the peer will be - /// considered banned. - // This intermediate state is required to inform the network behaviours that the sub-protocols - // are aware this peer exists and it is in the process of being banned. Compared to nodes that - // try to connect to us and are already banned (sub protocols do not know of these peers). + /// Notify the we are currently disconnecting this peer. Optionally ban the peer after the + /// disconnect. pub fn disconnecting(&mut self, to_ban: bool) { self.connection_status = Disconnecting { to_ban } } diff --git a/beacon_node/eth2_libp2p/src/peer_manager/peerdb.rs b/beacon_node/eth2_libp2p/src/peer_manager/peerdb.rs index e96ca3a81d0..438980b9ee0 100644 --- a/beacon_node/eth2_libp2p/src/peer_manager/peerdb.rs +++ b/beacon_node/eth2_libp2p/src/peer_manager/peerdb.rs @@ -319,7 +319,7 @@ impl PeerDB { let mut by_status = self .peers .iter() - .filter(|(_, info)| is_status(&info)) + .filter(|(_, info)| is_status(info)) .collect::>(); by_status.sort_by_key(|(_, info)| info.score()); by_status.into_iter().rev().collect() @@ -332,7 +332,7 @@ impl PeerDB { { self.peers .iter() - .filter(|(_, info)| is_status(&info)) + .filter(|(_, info)| is_status(info)) .max_by_key(|(_, info)| info.score()) .map(|(id, _)| id) } @@ -453,29 +453,33 @@ impl PeerDB { self.connect(peer_id, multiaddr, enr, ConnectionDirection::Outgoing) } - /// Sets the peer as disconnected. A banned peer remains banned - pub fn notify_disconnect(&mut self, peer_id: &PeerId) { + /// Sets the peer as disconnected. A banned peer remains banned. If the node has become banned, + /// this returns true, otherwise this is false. + pub fn inject_disconnect(&mut self, peer_id: &PeerId) -> bool { // Note that it could be the case we prevent new nodes from joining. In this instance, // we don't bother tracking the new node. if let Some(info) = self.peers.get_mut(peer_id) { - if let Some(became_banned) = info.notify_disconnect() { - if became_banned { - self.banned_peers_count - .add_banned_peer(info.seen_addresses()); - } else { - self.disconnected_peers += 1; - } + if !matches!( + info.connection_status(), + PeerConnectionStatus::Disconnected { .. } | PeerConnectionStatus::Banned { .. } + ) { + self.disconnected_peers += 1; } + let result = info.notify_disconnect().unwrap_or(false); self.shrink_to_fit(); + result + } else { + false } } - /// Notifies the peer manager that the peer is undergoing a normal disconnect (without banning - /// afterwards. - pub fn notify_disconnecting(&mut self, peer_id: &PeerId) { - if let Some(info) = self.peers.get_mut(peer_id) { - info.disconnecting(false); - } + /// Notifies the peer manager that the peer is undergoing a normal disconnect. Optionally tag + /// the peer to be banned after the disconnect. + pub fn notify_disconnecting(&mut self, peer_id: PeerId, to_ban_afterwards: bool) { + self.peers + .entry(peer_id) + .or_default() + .disconnecting(to_ban_afterwards); } /// Marks a peer to be disconnected and then banned. @@ -505,15 +509,17 @@ impl PeerDB { PeerConnectionStatus::Disconnected { .. } => { // It is possible to ban a peer that has a disconnected score, if there are many // events that score it poorly and are processed after it has disconnected. - debug!(log_ref, "Banning a disconnected peer"; "peer_id" => %peer_id); self.disconnected_peers = self.disconnected_peers.saturating_sub(1); info.ban(); self.banned_peers_count .add_banned_peer(info.seen_addresses()); + self.shrink_to_fit(); false } PeerConnectionStatus::Disconnecting { .. } => { - warn!(log_ref, "Banning peer that is currently disconnecting"; "peer_id" => %peer_id); + // NOTE: This can occur due a rapid downscore of a peer. It goes through the + // disconnection phase and straight into banning in a short time-frame. + debug!(log_ref, "Banning peer that is currently disconnecting"; "peer_id" => %peer_id); info.disconnecting(true); false } @@ -532,6 +538,7 @@ impl PeerDB { self.banned_peers_count .add_banned_peer(info.seen_addresses()); info.ban(); + self.shrink_to_fit(); false } } @@ -726,7 +733,7 @@ mod tests { assert_eq!(pdb.disconnected_peers, 0); for p in pdb.connected_peer_ids().cloned().collect::>() { - pdb.notify_disconnect(&p); + pdb.inject_disconnect(&p); } assert_eq!(pdb.disconnected_peers, MAX_DC_PEERS); @@ -744,7 +751,8 @@ mod tests { for p in pdb.connected_peer_ids().cloned().collect::>() { pdb.disconnect_and_ban(&p); - pdb.notify_disconnect(&p); + pdb.inject_disconnect(&p); + pdb.disconnect_and_ban(&p); } assert_eq!(pdb.banned_peers_count.banned_peers(), MAX_BANNED_PEERS); @@ -804,23 +812,24 @@ mod tests { pdb.connect_ingoing(&random_peer, "/ip4/0.0.0.0".parse().unwrap(), None); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); - pdb.notify_disconnect(&random_peer); + pdb.inject_disconnect(&random_peer); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); pdb.connect_outgoing(&random_peer, "/ip4/0.0.0.0".parse().unwrap(), None); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); - pdb.notify_disconnect(&random_peer); + pdb.inject_disconnect(&random_peer); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); pdb.disconnect_and_ban(&random_peer); - pdb.notify_disconnect(&random_peer); + pdb.inject_disconnect(&random_peer); + pdb.disconnect_and_ban(&random_peer); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); - pdb.notify_disconnect(&random_peer); + pdb.inject_disconnect(&random_peer); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); - pdb.notify_disconnect(&random_peer); + pdb.inject_disconnect(&random_peer); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); - pdb.notify_disconnect(&random_peer); + pdb.inject_disconnect(&random_peer); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); } @@ -835,6 +844,10 @@ mod tests { let random_peer1 = PeerId::random(); let random_peer2 = PeerId::random(); let random_peer3 = PeerId::random(); + println!("{}", random_peer); + println!("{}", random_peer1); + println!("{}", random_peer2); + println!("{}", random_peer3); pdb.connect_ingoing(&random_peer, multiaddr.clone(), None); pdb.connect_ingoing(&random_peer1, multiaddr.clone(), None); @@ -846,10 +859,17 @@ mod tests { pdb.banned_peers().count() ); + println!("1:{}", pdb.disconnected_peers); + pdb.connect_ingoing(&random_peer, multiaddr.clone(), None); - pdb.notify_disconnect(&random_peer1); + pdb.inject_disconnect(&random_peer1); + println!("2:{}", pdb.disconnected_peers); + pdb.disconnect_and_ban(&random_peer2); + println!("3:{}", pdb.disconnected_peers); + pdb.inject_disconnect(&random_peer2); + println!("4:{}", pdb.disconnected_peers); pdb.disconnect_and_ban(&random_peer2); - pdb.notify_disconnect(&random_peer2); + println!("5:{}", pdb.disconnected_peers); pdb.connect_ingoing(&random_peer3, multiaddr.clone(), None); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); assert_eq!( @@ -857,7 +877,16 @@ mod tests { pdb.banned_peers().count() ); pdb.disconnect_and_ban(&random_peer1); - pdb.notify_disconnect(&random_peer1); + println!("6:{}", pdb.disconnected_peers); + pdb.inject_disconnect(&random_peer1); + println!("7:{}", pdb.disconnected_peers); + pdb.disconnect_and_ban(&random_peer1); + println!("8:{}", pdb.disconnected_peers); + println!( + "{}, {:?}", + pdb.disconnected_peers, + pdb.disconnected_peers().collect::>() + ); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); assert_eq!( pdb.banned_peers_count.banned_peers(), @@ -871,7 +900,8 @@ mod tests { pdb.banned_peers().count() ); pdb.disconnect_and_ban(&random_peer3); - pdb.notify_disconnect(&random_peer3); + pdb.inject_disconnect(&random_peer3); + pdb.disconnect_and_ban(&random_peer3); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); assert_eq!( pdb.banned_peers_count.banned_peers(), @@ -879,32 +909,34 @@ mod tests { ); pdb.disconnect_and_ban(&random_peer3); - pdb.notify_disconnect(&random_peer3); + pdb.inject_disconnect(&random_peer3); + pdb.disconnect_and_ban(&random_peer3); pdb.connect_ingoing(&random_peer1, multiaddr.clone(), None); - pdb.notify_disconnect(&random_peer2); + pdb.inject_disconnect(&random_peer2); + pdb.disconnect_and_ban(&random_peer3); + pdb.inject_disconnect(&random_peer3); pdb.disconnect_and_ban(&random_peer3); - pdb.notify_disconnect(&random_peer3); pdb.connect_ingoing(&random_peer, multiaddr, None); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); assert_eq!( pdb.banned_peers_count.banned_peers(), pdb.banned_peers().count() ); - pdb.notify_disconnect(&random_peer); + pdb.inject_disconnect(&random_peer); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); assert_eq!( pdb.banned_peers_count.banned_peers(), pdb.banned_peers().count() ); - pdb.notify_disconnect(&random_peer); + pdb.inject_disconnect(&random_peer); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); assert_eq!( pdb.banned_peers_count.banned_peers(), pdb.banned_peers().count() ); pdb.disconnect_and_ban(&random_peer); - pdb.notify_disconnect(&random_peer); + pdb.inject_disconnect(&random_peer); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); } @@ -950,7 +982,8 @@ mod tests { for p in &peers[..BANNED_PEERS_PER_IP_THRESHOLD + 1] { pdb.disconnect_and_ban(p); - pdb.notify_disconnect(p); + pdb.inject_disconnect(p); + pdb.disconnect_and_ban(p); } //check that ip1 and ip2 are banned but ip3-5 not @@ -962,7 +995,8 @@ mod tests { //ban also the last peer in peers pdb.disconnect_and_ban(&peers[BANNED_PEERS_PER_IP_THRESHOLD + 1]); - pdb.notify_disconnect(&peers[BANNED_PEERS_PER_IP_THRESHOLD + 1]); + pdb.inject_disconnect(&peers[BANNED_PEERS_PER_IP_THRESHOLD + 1]); + pdb.disconnect_and_ban(&peers[BANNED_PEERS_PER_IP_THRESHOLD + 1]); //check that ip1-ip4 are banned but ip5 not assert!(pdb.is_banned(&p1)); @@ -1012,7 +1046,8 @@ mod tests { // ban all peers for p in &peers { pdb.disconnect_and_ban(p); - pdb.notify_disconnect(p); + pdb.inject_disconnect(p); + pdb.disconnect_and_ban(p); } // check ip is banned @@ -1031,9 +1066,10 @@ mod tests { let mut socker_addr = Multiaddr::from(ip2); socker_addr.push(Protocol::Tcp(8080)); for p in &peers { - pdb.connect_ingoing(&p, socker_addr.clone(), None); + pdb.connect_ingoing(p, socker_addr.clone(), None); + pdb.disconnect_and_ban(p); + pdb.inject_disconnect(p); pdb.disconnect_and_ban(p); - pdb.notify_disconnect(p); } // both IP's are now banned @@ -1042,14 +1078,15 @@ mod tests { // unban all peers for p in &peers { - reset_score(&mut pdb, &p); + reset_score(&mut pdb, p); pdb.unban(p).unwrap(); } // reban every peer except one for p in &peers[1..] { pdb.disconnect_and_ban(p); - pdb.notify_disconnect(p); + pdb.inject_disconnect(p); + pdb.disconnect_and_ban(p); } // nothing is banned @@ -1058,7 +1095,8 @@ mod tests { //reban last peer pdb.disconnect_and_ban(&peers[0]); - pdb.notify_disconnect(&peers[0]); + pdb.inject_disconnect(&peers[0]); + pdb.disconnect_and_ban(&peers[0]); //Ip's are banned again assert!(pdb.is_banned(&p1)); diff --git a/beacon_node/eth2_libp2p/src/peer_manager/score.rs b/beacon_node/eth2_libp2p/src/peer_manager/score.rs index 02479bef067..8b20192296d 100644 --- a/beacon_node/eth2_libp2p/src/peer_manager/score.rs +++ b/beacon_node/eth2_libp2p/src/peer_manager/score.rs @@ -5,7 +5,7 @@ //! As the logic develops this documentation will advance. //! //! The scoring algorithms are currently experimental. -use crate::behaviour::GOSSIPSUB_GREYLIST_THRESHOLD; +use crate::behaviour::gossipsub_scoring_parameters::GREYLIST_THRESHOLD as GOSSIPSUB_GREYLIST_THRESHOLD; use serde::Serialize; use std::time::Instant; use strum::AsRefStr; @@ -31,7 +31,7 @@ const MIN_SCORE: f64 = -100.0; /// The halflife of a peer's score. I.e the number of seconds it takes for the score to decay to half its value. const SCORE_HALFLIFE: f64 = 600.0; /// The number of seconds we ban a peer for before their score begins to decay. -const BANNED_BEFORE_DECAY: Duration = Duration::from_secs(1800); +const BANNED_BEFORE_DECAY: Duration = Duration::from_secs(12 * 3600); // 12 hours /// We weight negative gossipsub scores in such a way that they never result in a disconnect by /// themselves. This "solves" the problem of non-decaying gossipsub scores for disconnected peers. diff --git a/beacon_node/eth2_libp2p/src/rpc/handler.rs b/beacon_node/eth2_libp2p/src/rpc/handler.rs index 55b40182264..554e6787f83 100644 --- a/beacon_node/eth2_libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2_libp2p/src/rpc/handler.rs @@ -1,8 +1,10 @@ #![allow(clippy::type_complexity)] #![allow(clippy::cognitive_complexity)] -use super::methods::{RPCCodedResponse, RPCResponseErrorCode, RequestId, ResponseTermination}; -use super::protocol::{Protocol, RPCError, RPCProtocol}; +use super::methods::{ + GoodbyeReason, RPCCodedResponse, RPCResponseErrorCode, RequestId, ResponseTermination, +}; +use super::protocol::{InboundRequest, Protocol, RPCError, RPCProtocol}; use super::{RPCReceived, RPCSend}; use crate::rpc::outbound::{OutboundFramed, OutboundRequest}; use crate::rpc::protocol::InboundFramed; @@ -221,13 +223,14 @@ where } } - /// Initiates the handler's shutdown process, sending an optional last message to the peer. - pub fn shutdown(&mut self, final_msg: Option<(RequestId, OutboundRequest)>) { + /// Initiates the handler's shutdown process, sending an optional Goodbye message to the + /// peer. + fn shutdown(&mut self, goodbye_reason: Option) { if matches!(self.state, HandlerState::Active) { if !self.dial_queue.is_empty() { debug!(self.log, "Starting handler shutdown"; "unsent_queued_requests" => self.dial_queue.len()); } - // we now drive to completion communications already dialed/established + // We now drive to completion communications already dialed/established while let Some((id, req)) = self.dial_queue.pop() { self.events_out.push(Err(HandlerErr::Outbound { error: RPCError::HandlerRejected, @@ -236,9 +239,10 @@ where })); } - // Queue our final message, if any - if let Some((id, req)) = final_msg { - self.dial_queue.push((id, req)); + // Queue our goodbye message. + if let Some(reason) = goodbye_reason { + self.dial_queue + .push((RequestId::Router, OutboundRequest::Goodbye(reason))); } self.state = HandlerState::ShuttingDown(Box::new(sleep_until( @@ -345,6 +349,11 @@ where ); } + // If we received a goodbye, shutdown the connection. + if let InboundRequest::Goodbye(_) = req { + self.shutdown(None); + } + self.events_out.push(Ok(RPCReceived::Request( self.current_inbound_substream_id, req, @@ -412,6 +421,7 @@ where match rpc_event { RPCSend::Request(id, req) => self.send_request(id, req), RPCSend::Response(inbound_id, response) => self.send_response(inbound_id, response), + RPCSend::Shutdown(reason) => self.shutdown(Some(reason)), } } @@ -512,6 +522,9 @@ where if delay.is_elapsed() { self.state = HandlerState::Deactivated; debug!(self.log, "Handler deactivated"); + return Poll::Ready(ProtocolsHandlerEvent::Close(RPCError::InternalError( + "Shutdown timeout", + ))); } } @@ -864,6 +877,19 @@ where protocol: SubstreamProtocol::new(req.clone(), ()).map_info(|()| (id, req)), }); } + + // Check if we have completed sending a goodbye, disconnect. + if let HandlerState::ShuttingDown(_) = self.state { + if self.dial_queue.is_empty() + && self.outbound_substreams.is_empty() + && self.inbound_substreams.is_empty() + && self.events_out.is_empty() + && self.dial_negotiated == 0 + { + return Poll::Ready(ProtocolsHandlerEvent::Close(RPCError::Disconnected)); + } + } + Poll::Pending } } diff --git a/beacon_node/eth2_libp2p/src/rpc/methods.rs b/beacon_node/eth2_libp2p/src/rpc/methods.rs index 8facac48af9..e24b6e980b2 100644 --- a/beacon_node/eth2_libp2p/src/rpc/methods.rs +++ b/beacon_node/eth2_libp2p/src/rpc/methods.rs @@ -149,9 +149,9 @@ impl From for GoodbyeReason { } } -impl Into for GoodbyeReason { - fn into(self) -> u64 { - self as u64 +impl From for u64 { + fn from(reason: GoodbyeReason) -> u64 { + reason as u64 } } diff --git a/beacon_node/eth2_libp2p/src/rpc/mod.rs b/beacon_node/eth2_libp2p/src/rpc/mod.rs index b91ca71fa34..702e3e20dfe 100644 --- a/beacon_node/eth2_libp2p/src/rpc/mod.rs +++ b/beacon_node/eth2_libp2p/src/rpc/mod.rs @@ -52,6 +52,8 @@ pub enum RPCSend { /// peer. The second parameter is a single chunk of a response. These go over *inbound* /// connections. Response(SubstreamId, RPCCodedResponse), + /// Lighthouse has requested to terminate the connection with a goodbye message. + Shutdown(GoodbyeReason), } /// RPC events received from outside Lighthouse. @@ -77,6 +79,7 @@ impl std::fmt::Display for RPCSend { match self { RPCSend::Request(id, req) => write!(f, "RPC Request(id: {:?}, {})", id, req), RPCSend::Response(id, res) => write!(f, "RPC Response(id: {:?}, {})", id, res), + RPCSend::Shutdown(reason) => write!(f, "Sending Goodbye: {}", reason), } } } @@ -115,11 +118,7 @@ impl RPC { methods::MAX_REQUEST_BLOCKS, Duration::from_secs(10), ) - .n_every( - Protocol::BlocksByRoot, - methods::MAX_REQUEST_BLOCKS, - Duration::from_secs(10), - ) + .n_every(Protocol::BlocksByRoot, 128, Duration::from_secs(10)) .build() .expect("Configuration parameters are valid"); RPC { @@ -160,6 +159,16 @@ impl RPC { event: RPCSend::Request(request_id, event), }); } + + /// Lighthouse wishes to disconnect from this peer by sending a Goodbye message. This + /// gracefully terminates the RPC behaviour with a goodbye message. + pub fn shutdown(&mut self, peer_id: PeerId, reason: GoodbyeReason) { + self.events.push(NetworkBehaviourAction::NotifyHandler { + peer_id, + handler: NotifyHandler::Any, + event: RPCSend::Shutdown(reason), + }); + } } impl NetworkBehaviour for RPC diff --git a/beacon_node/eth2_libp2p/src/rpc/protocol.rs b/beacon_node/eth2_libp2p/src/rpc/protocol.rs index 44e180fb598..031246ba16c 100644 --- a/beacon_node/eth2_libp2p/src/rpc/protocol.rs +++ b/beacon_node/eth2_libp2p/src/rpc/protocol.rs @@ -452,6 +452,8 @@ pub enum RPCError { NegotiationTimeout, /// Handler rejected this request. HandlerRejected, + /// We have intentionally disconnected. + Disconnected, } impl From for RPCError { @@ -490,6 +492,7 @@ impl std::fmt::Display for RPCError { RPCError::InternalError(ref err) => write!(f, "Internal error: {}", err), RPCError::NegotiationTimeout => write!(f, "Negotiation timeout"), RPCError::HandlerRejected => write!(f, "Handler rejected the request"), + RPCError::Disconnected => write!(f, "Gracefully Disconnected"), } } } @@ -508,6 +511,7 @@ impl std::error::Error for RPCError { RPCError::ErrorResponse(_, _) => None, RPCError::NegotiationTimeout => None, RPCError::HandlerRejected => None, + RPCError::Disconnected => None, } } } diff --git a/beacon_node/eth2_libp2p/src/service.rs b/beacon_node/eth2_libp2p/src/service.rs index 21c75836eb1..f19e6ffe6cc 100644 --- a/beacon_node/eth2_libp2p/src/service.rs +++ b/beacon_node/eth2_libp2p/src/service.rs @@ -27,6 +27,8 @@ use std::sync::Arc; use std::time::Duration; use types::{ChainSpec, EnrForkId, EthSpec}; +use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR}; + pub const NETWORK_KEY_FILENAME: &str = "key"; /// The maximum simultaneous libp2p connections per peer. const MAX_CONNECTIONS_PER_PEER: u32 = 1; @@ -129,8 +131,17 @@ impl Service { let limits = ConnectionLimits::default() .with_max_pending_incoming(Some(5)) .with_max_pending_outgoing(Some(16)) - .with_max_established_incoming(Some((config.target_peers as f64 * 1.2) as u32)) - .with_max_established_outgoing(Some((config.target_peers as f64 * 1.2) as u32)) + .with_max_established_incoming(Some( + (config.target_peers as f32 + * (1.0 + PEER_EXCESS_FACTOR - MIN_OUTBOUND_ONLY_FACTOR)) + as u32, + )) + .with_max_established_outgoing(Some( + (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)) as u32, + )) + .with_max_established_total(Some( + (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)) as u32, + )) .with_max_established_per_peer(Some(MAX_CONNECTIONS_PER_PEER)); ( @@ -221,7 +232,7 @@ impl Service { let mut subscribed_topics: Vec = vec![]; for topic_kind in &config.topics { - if swarm.subscribe_kind(topic_kind.clone()) { + if swarm.behaviour_mut().subscribe_kind(topic_kind.clone()) { subscribed_topics.push(topic_kind.clone()); } else { warn!(log, "Could not subscribe to topic"; "topic" => %topic_kind); @@ -244,7 +255,9 @@ impl Service { /// Sends a request to a peer, with a given Id. pub fn send_request(&mut self, peer_id: PeerId, request_id: RequestId, request: Request) { - self.swarm.send_request(peer_id, request_id, request); + self.swarm + .behaviour_mut() + .send_request(peer_id, request_id, request); } /// Informs the peer that their request failed. @@ -255,42 +268,80 @@ impl Service { error: RPCResponseErrorCode, reason: String, ) { - self.swarm._send_error_reponse(peer_id, id, error, reason); + self.swarm + .behaviour_mut() + ._send_error_reponse(peer_id, id, error, reason); } /// Report a peer's action. pub fn report_peer(&mut self, peer_id: &PeerId, action: PeerAction, source: ReportSource) { - self.swarm.report_peer(peer_id, action, source); + self.swarm + .behaviour_mut() + .peer_manager_mut() + .report_peer(peer_id, action, source); } /// Disconnect and ban a peer, providing a reason. pub fn goodbye_peer(&mut self, peer_id: &PeerId, reason: GoodbyeReason, source: ReportSource) { - self.swarm.goodbye_peer(peer_id, reason, source); + self.swarm + .behaviour_mut() + .goodbye_peer(peer_id, reason, source); } /// Sends a response to a peer's request. pub fn send_response(&mut self, peer_id: PeerId, id: PeerRequestId, response: Response) { - self.swarm.send_successful_response(peer_id, id, response); + self.swarm + .behaviour_mut() + .send_successful_response(peer_id, id, response); } pub async fn next_event(&mut self) -> Libp2pEvent { loop { - match self.swarm.next_event().await { - SwarmEvent::Behaviour(behaviour) => return Libp2pEvent::Behaviour(behaviour), - SwarmEvent::ConnectionEstablished { .. } => { - // A connection could be established with a banned peer. This is - // handled inside the behaviour. + match self.swarm.select_next_some().await { + SwarmEvent::Behaviour(behaviour) => { + // Handle banning here + match &behaviour { + BehaviourEvent::PeerBanned(peer_id) => { + self.swarm.ban_peer_id(*peer_id); + } + BehaviourEvent::PeerUnbanned(peer_id) => { + self.swarm.unban_peer_id(*peer_id); + } + _ => {} + } + return Libp2pEvent::Behaviour(behaviour); + } + SwarmEvent::ConnectionEstablished { + peer_id, + endpoint, + num_established, + } => { + // Inform the peer manager. + // We require the ENR to inject into the peer db, if it exists. + let enr = self + .swarm + .behaviour_mut() + .discovery_mut() + .enr_of_peer(&peer_id); + self.swarm + .behaviour_mut() + .peer_manager_mut() + .inject_connection_established(peer_id, endpoint, num_established, enr); } SwarmEvent::ConnectionClosed { peer_id, - cause, - endpoint: _, + cause: _, + endpoint, num_established, } => { - trace!(self.log, "Connection closed"; "peer_id" => %peer_id, "cause" => ?cause, "connections" => num_established); + // Inform the peer manager. + self.swarm + .behaviour_mut() + .peer_manager_mut() + .inject_connection_closed(peer_id, endpoint, num_established); } - SwarmEvent::NewListenAddr(multiaddr) => { - return Libp2pEvent::NewListenAddr(multiaddr) + SwarmEvent::NewListenAddr { address, .. } => { + return Libp2pEvent::NewListenAddr(address) } SwarmEvent::IncomingConnection { local_addr, @@ -303,10 +354,10 @@ impl Service { send_back_addr, error, } => { - debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => %error) + debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => %error); } - SwarmEvent::BannedPeer { .. } => { - // We do not ban peers at the swarm layer, so this should never occur. + SwarmEvent::BannedPeer { peer_id, .. } => { + debug!(self.log, "Banned peer connection rejected"; "peer_id" => %peer_id); } SwarmEvent::UnreachableAddr { peer_id, @@ -315,20 +366,26 @@ impl Service { attempts_remaining, } => { debug!(self.log, "Failed to dial address"; "peer_id" => %peer_id, "address" => %address, "error" => %error, "attempts_remaining" => attempts_remaining); + self.swarm + .behaviour_mut() + .peer_manager_mut() + .inject_dial_failure(&peer_id); } SwarmEvent::UnknownPeerUnreachableAddr { address, error } => { debug!(self.log, "Peer not known at dialed address"; "address" => %address, "error" => %error); } - SwarmEvent::ExpiredListenAddr(multiaddr) => { - debug!(self.log, "Listen address expired"; "multiaddr" => %multiaddr) + SwarmEvent::ExpiredListenAddr { address, .. } => { + debug!(self.log, "Listen address expired"; "address" => %address) } - SwarmEvent::ListenerClosed { addresses, reason } => { + SwarmEvent::ListenerClosed { + addresses, reason, .. + } => { crit!(self.log, "Listener closed"; "addresses" => ?addresses, "reason" => ?reason); if Swarm::listeners(&self.swarm).count() == 0 { return Libp2pEvent::ZeroListeners; } } - SwarmEvent::ListenerError { error } => { + SwarmEvent::ListenerError { error, .. } => { // this is non fatal, but we still check warn!(self.log, "Listener error"; "error" => ?error); if Swarm::listeners(&self.swarm).count() == 0 { @@ -336,7 +393,16 @@ impl Service { } } SwarmEvent::Dialing(peer_id) => { - debug!(self.log, "Dialing peer"; "peer_id" => %peer_id); + // We require the ENR to inject into the peer db, if it exists. + let enr = self + .swarm + .behaviour_mut() + .discovery_mut() + .enr_of_peer(&peer_id); + self.swarm + .behaviour_mut() + .peer_manager_mut() + .inject_dialing(&peer_id, enr); } } } @@ -350,8 +416,8 @@ type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>; fn build_transport( local_private_key: Keypair, ) -> std::io::Result<(BoxedTransport, Arc)> { - let transport = libp2p::tcp::TokioTcpConfig::new().nodelay(true); - let transport = libp2p::dns::DnsConfig::new(transport)?; + let tcp = libp2p::tcp::TokioTcpConfig::new().nodelay(true); + let transport = libp2p::dns::TokioDnsConfig::system(tcp)?; #[cfg(feature = "libp2p-websocket")] let transport = { let trans_clone = transport.clone(); @@ -365,13 +431,17 @@ fn build_transport( mplex_config.set_max_buffer_size(256); mplex_config.set_max_buffer_behaviour(libp2p::mplex::MaxBufferBehaviour::Block); + // yamux config + let mut yamux_config = libp2p::yamux::YamuxConfig::default(); + yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::on_read()); + // Authentication Ok(( transport .upgrade(core::upgrade::Version::V1) .authenticate(generate_noise_config(&local_private_key)) .multiplex(core::upgrade::SelectUpgrade::new( - libp2p::yamux::YamuxConfig::default(), + yamux_config, mplex_config, )) .timeout(Duration::from_secs(10)) @@ -508,6 +578,6 @@ fn load_or_build_metadata( }; debug!(log, "Metadata sequence number"; "seq_num" => meta_data.seq_number); - save_metadata_to_disk(network_dir, meta_data.clone(), &log); + save_metadata_to_disk(network_dir, meta_data.clone(), log); meta_data } diff --git a/beacon_node/eth2_libp2p/src/types/topics.rs b/beacon_node/eth2_libp2p/src/types/topics.rs index f8e2b676888..6bacfcf383c 100644 --- a/beacon_node/eth2_libp2p/src/types/topics.rs +++ b/beacon_node/eth2_libp2p/src/types/topics.rs @@ -144,19 +144,19 @@ impl GossipTopic { } } -impl Into for GossipTopic { - fn into(self) -> Topic { - Topic::new(self) +impl From for Topic { + fn from(topic: GossipTopic) -> Topic { + Topic::new(topic) } } -impl Into for GossipTopic { - fn into(self) -> String { - let encoding = match self.encoding { +impl From for String { + fn from(topic: GossipTopic) -> String { + let encoding = match topic.encoding { GossipEncoding::SSZSnappy => SSZ_SNAPPY_ENCODING_POSTFIX, }; - let kind = match self.kind { + let kind = match topic.kind { GossipKind::BeaconBlock => BEACON_BLOCK_TOPIC.into(), GossipKind::BeaconAggregateAndProof => BEACON_AGGREGATE_AND_PROOF_TOPIC.into(), GossipKind::VoluntaryExit => VOLUNTARY_EXIT_TOPIC.into(), @@ -167,7 +167,7 @@ impl Into for GossipTopic { format!( "/{}/{}/{}/{}", TOPIC_PREFIX, - hex::encode(self.fork_digest), + hex::encode(topic.fork_digest), kind, encoding ) diff --git a/beacon_node/eth2_libp2p/tests/common/mod.rs b/beacon_node/eth2_libp2p/tests/common/mod.rs index a09f800d076..1f60624287d 100644 --- a/beacon_node/eth2_libp2p/tests/common/mod.rs +++ b/beacon_node/eth2_libp2p/tests/common/mod.rs @@ -126,7 +126,7 @@ pub async fn build_libp2p_instance( #[allow(dead_code)] pub fn get_enr(node: &LibP2PService) -> Enr { - node.swarm.local_enr() + node.swarm.behaviour().local_enr() } // Returns `n` libp2p peers in fully connected topology. @@ -142,7 +142,7 @@ pub async fn build_full_mesh( } let multiaddrs: Vec = nodes .iter() - .map(|x| get_enr(&x).multiaddr()[1].clone()) + .map(|x| get_enr(x).multiaddr()[1].clone()) .collect(); for (i, node) in nodes.iter_mut().enumerate().take(n) { @@ -171,7 +171,7 @@ pub async fn build_node_pair( let mut sender = build_libp2p_instance(rt.clone(), vec![], sender_log).await; let mut receiver = build_libp2p_instance(rt, vec![], receiver_log).await; - let receiver_multiaddr = receiver.swarm.local_enr().multiaddr()[1].clone(); + let receiver_multiaddr = receiver.swarm.behaviour_mut().local_enr().multiaddr()[1].clone(); // let the two nodes set up listeners let sender_fut = async { @@ -216,7 +216,7 @@ pub async fn build_linear(rt: Weak, log: slog::Logger, n: usize) -> Vec let multiaddrs: Vec = nodes .iter() - .map(|x| get_enr(&x).multiaddr()[1].clone()) + .map(|x| get_enr(x).multiaddr()[1].clone()) .collect(); for i in 0..n - 1 { match libp2p::Swarm::dial_addr(&mut nodes[i].swarm, multiaddrs[i + 1].clone()) { diff --git a/beacon_node/eth2_libp2p/tests/rpc_tests.rs b/beacon_node/eth2_libp2p/tests/rpc_tests.rs index 1b565a4655e..d621bf31cc8 100644 --- a/beacon_node/eth2_libp2p/tests/rpc_tests.rs +++ b/beacon_node/eth2_libp2p/tests/rpc_tests.rs @@ -53,10 +53,10 @@ fn test_status_rpc() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => { + Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender.swarm.send_request( + sender.swarm.behaviour_mut().send_request( peer_id, RequestId::Sync(10), rpc_request.clone(), @@ -90,7 +90,7 @@ fn test_status_rpc() { if request == rpc_request { // send the response debug!(log, "Receiver Received"); - receiver.swarm.send_successful_response( + receiver.swarm.behaviour_mut().send_successful_response( peer_id, id, rpc_response.clone(), @@ -149,10 +149,10 @@ fn test_blocks_by_range_chunked_rpc() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => { + Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender.swarm.send_request( + sender.swarm.behaviour_mut().send_request( peer_id, RequestId::Sync(10), rpc_request.clone(), @@ -197,14 +197,14 @@ fn test_blocks_by_range_chunked_rpc() { // send the response warn!(log, "Receiver got request"); for _ in 1..=messages_to_send { - receiver.swarm.send_successful_response( + receiver.swarm.behaviour_mut().send_successful_response( peer_id, id, rpc_response.clone(), ); } // send the stream termination - receiver.swarm.send_successful_response( + receiver.swarm.behaviour_mut().send_successful_response( peer_id, id, Response::BlocksByRange(None), @@ -263,10 +263,10 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => { + Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender.swarm.send_request( + sender.swarm.behaviour_mut().send_request( peer_id, RequestId::Sync(10), rpc_request.clone(), @@ -335,7 +335,7 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { if message_info.is_some() { messages_sent += 1; let (peer_id, stream_id) = message_info.as_ref().unwrap(); - receiver.swarm.send_successful_response( + receiver.swarm.behaviour_mut().send_successful_response( *peer_id, *stream_id, rpc_response.clone(), @@ -395,10 +395,10 @@ fn test_blocks_by_range_single_empty_rpc() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => { + Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender.swarm.send_request( + sender.swarm.behaviour_mut().send_request( peer_id, RequestId::Sync(10), rpc_request.clone(), @@ -441,14 +441,14 @@ fn test_blocks_by_range_single_empty_rpc() { warn!(log, "Receiver got request"); for _ in 1..=messages_to_send { - receiver.swarm.send_successful_response( + receiver.swarm.behaviour_mut().send_successful_response( peer_id, id, rpc_response.clone(), ); } // send the stream termination - receiver.swarm.send_successful_response( + receiver.swarm.behaviour_mut().send_successful_response( peer_id, id, Response::BlocksByRange(None), @@ -510,10 +510,10 @@ fn test_blocks_by_root_chunked_rpc() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => { + Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender.swarm.send_request( + sender.swarm.behaviour_mut().send_request( peer_id, RequestId::Sync(10), rpc_request.clone(), @@ -556,7 +556,7 @@ fn test_blocks_by_root_chunked_rpc() { debug!(log, "Receiver got request"); for _ in 1..=messages_to_send { - receiver.swarm.send_successful_response( + receiver.swarm.behaviour_mut().send_successful_response( peer_id, id, rpc_response.clone(), @@ -564,7 +564,7 @@ fn test_blocks_by_root_chunked_rpc() { debug!(log, "Sending message"); } // send the stream termination - receiver.swarm.send_successful_response( + receiver.swarm.behaviour_mut().send_successful_response( peer_id, id, Response::BlocksByRange(None), @@ -631,10 +631,10 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => { + Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender.swarm.send_request( + sender.swarm.behaviour_mut().send_request( peer_id, RequestId::Sync(10), rpc_request.clone(), @@ -703,7 +703,7 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { if message_info.is_some() { messages_sent += 1; let (peer_id, stream_id) = message_info.as_ref().unwrap(); - receiver.swarm.send_successful_response( + receiver.swarm.behaviour_mut().send_successful_response( *peer_id, *stream_id, rpc_response.clone(), @@ -746,10 +746,10 @@ fn test_goodbye_rpc() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => { + Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { // Send a goodbye and disconnect debug!(log, "Sending RPC"); - sender.swarm.goodbye_peer( + sender.swarm.behaviour_mut().goodbye_peer( &peer_id, GoodbyeReason::IrrelevantNetwork, ReportSource::SyncService, diff --git a/beacon_node/genesis/Cargo.toml b/beacon_node/genesis/Cargo.toml index 04706f72d97..bf19189f97d 100644 --- a/beacon_node/genesis/Cargo.toml +++ b/beacon_node/genesis/Cargo.toml @@ -20,7 +20,7 @@ merkle_proof = { path = "../../consensus/merkle_proof" } eth2_ssz = "0.1.2" eth2_hashing = "0.1.0" tree_hash = "0.1.1" -tokio = { version = "1.1.0", features = ["full"] } +tokio = { version = "1.7.1", features = ["full"] } parking_lot = "0.11.0" slog = "2.5.2" exit-future = "0.2.0" diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs index d5ef6ad0d90..8a5bbd0b16b 100644 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ b/beacon_node/genesis/src/eth1_genesis_service.rs @@ -316,7 +316,7 @@ impl Eth1GenesisService { // // Note: this state is fully valid, some fields have been bypassed to make verification // faster. - let state = self.cheap_state_at_eth1_block::(block, &spec)?; + let state = self.cheap_state_at_eth1_block::(block, spec)?; let active_validator_count = state .get_active_validator_indices(E::genesis_epoch(), spec) .map_err(|e| format!("Genesis validators error: {:?}", e))? @@ -328,7 +328,7 @@ impl Eth1GenesisService { if is_valid_genesis_state(&state, spec) { let genesis_state = self - .genesis_from_eth1_block(block.clone(), &spec) + .genesis_from_eth1_block(block.clone(), spec) .map_err(|e| format!("Failed to generate valid genesis state : {}", e))?; return Ok(Some(genesis_state)); @@ -372,12 +372,12 @@ impl Eth1GenesisService { let genesis_state = initialize_beacon_state_from_eth1( eth1_block.hash, eth1_block.timestamp, - genesis_deposits(deposit_logs, &spec)?, - &spec, + genesis_deposits(deposit_logs, spec)?, + spec, ) .map_err(|e| format!("Unable to initialize genesis state: {:?}", e))?; - if is_valid_genesis_state(&genesis_state, &spec) { + if is_valid_genesis_state(&genesis_state, spec) { Ok(genesis_state) } else { Err("Generated state was not valid.".to_string()) @@ -406,7 +406,7 @@ impl Eth1GenesisService { deposit_root: Hash256::zero(), deposit_count: 0, }, - &spec, + spec, ); self.deposit_logs_at_block(eth1_block.number) diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index ddea773f7ac..0f288cfea0c 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" [dependencies] warp = { git = "https://github.com/paulhauner/warp ", branch = "cors-wildcard" } serde = { version = "1.0.116", features = ["derive"] } -tokio = { version = "1.1.0", features = ["macros","sync"] } +tokio = { version = "1.7.1", features = ["macros","sync"] } tokio-stream = { version = "0.1.3", features = ["sync"] } tokio-util = "0.6.3" parking_lot = "0.11.0" @@ -34,5 +34,4 @@ futures = "0.3.8" store = { path = "../store" } environment = { path = "../../lighthouse/environment" } tree_hash = "0.1.1" -discv5 = { version = "0.1.0-beta.5", features = ["libp2p"] } sensitive_url = { path = "../../common/sensitive_url" } diff --git a/beacon_node/http_api/src/attester_duties.rs b/beacon_node/http_api/src/attester_duties.rs index 20f254889a2..9207067e33d 100644 --- a/beacon_node/http_api/src/attester_duties.rs +++ b/beacon_node/http_api/src/attester_duties.rs @@ -63,7 +63,7 @@ fn cached_attestation_duties( .map_err(warp_utils::reject::beacon_chain_error)?; let (duties, dependent_root) = chain - .validator_attestation_duties(&request_indices, request_epoch, head.block_root) + .validator_attestation_duties(request_indices, request_epoch, head.block_root) .map_err(warp_utils::reject::beacon_chain_error)?; convert_to_api_response(duties, request_indices, dependent_root, chain) @@ -104,7 +104,7 @@ fn compute_historic_attester_duties( )?; state } else { - StateId::slot(request_epoch.start_slot(T::EthSpec::slots_per_epoch())).state(&chain)? + StateId::slot(request_epoch.start_slot(T::EthSpec::slots_per_epoch())).state(chain)? }; // Sanity-check the state lookup. diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 31b2e47c98e..ce2ee855431 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -939,7 +939,6 @@ pub fn serve( blocking_json_task(move || { block_id .block(&chain) - // FIXME(altair): could avoid clone with by-value accessor .map(|block| block.message().body().attestations().clone()) .map(api_types::GenericResponse::from) }) @@ -1516,11 +1515,9 @@ pub fn serve( peer_id: peer_id.to_string(), enr: peer_info.enr.as_ref().map(|enr| enr.to_base64()), last_seen_p2p_address: address, - direction: api_types::PeerDirection::from_connection_direction( - &dir, - ), + direction: api_types::PeerDirection::from_connection_direction(dir), state: api_types::PeerState::from_peer_connection_status( - &peer_info.connection_status(), + peer_info.connection_status(), ), })); } @@ -1564,9 +1561,9 @@ pub fn serve( // the eth2 API spec implies only peers we have been connected to at some point should be included. if let Some(dir) = peer_info.connection_direction.as_ref() { let direction = - api_types::PeerDirection::from_connection_direction(&dir); + api_types::PeerDirection::from_connection_direction(dir); let state = api_types::PeerState::from_peer_connection_status( - &peer_info.connection_status(), + peer_info.connection_status(), ); let state_matches = query.state.as_ref().map_or(true, |states| { @@ -1617,7 +1614,7 @@ pub fn serve( .peers() .for_each(|(_, peer_info)| { let state = api_types::PeerState::from_peer_connection_status( - &peer_info.connection_status(), + peer_info.connection_status(), ); match state { api_types::PeerState::Connected => connected += 1, @@ -1910,6 +1907,49 @@ pub fn serve( }, ); + // POST lighthouse/liveness + let post_lighthouse_liveness = warp::path("lighthouse") + .and(warp::path("liveness")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(chain_filter.clone()) + .and_then( + |request_data: api_types::LivenessRequestData, chain: Arc>| { + blocking_json_task(move || { + // Ensure the request is for either the current, previous or next epoch. + let current_epoch = chain + .epoch() + .map_err(warp_utils::reject::beacon_chain_error)?; + let prev_epoch = current_epoch.saturating_sub(Epoch::new(1)); + let next_epoch = current_epoch.saturating_add(Epoch::new(1)); + + if request_data.epoch < prev_epoch || request_data.epoch > next_epoch { + return Err(warp_utils::reject::custom_bad_request(format!( + "request epoch {} is more than one epoch from the current epoch {}", + request_data.epoch, current_epoch + ))); + } + + let liveness: Vec = request_data + .indices + .iter() + .cloned() + .map(|index| { + let is_live = + chain.validator_seen_at_epoch(index as usize, request_data.epoch); + api_types::LivenessResponseData { + index: index as u64, + epoch: request_data.epoch, + is_live, + } + }) + .collect(); + + Ok(api_types::GenericResponse::from(liveness)) + }) + }, + ); + // GET lighthouse/health let get_lighthouse_health = warp::path("lighthouse") .and(warp::path("health")) @@ -2252,6 +2292,7 @@ pub fn serve( .or(post_beacon_pool_voluntary_exits.boxed()) .or(post_validator_duties_attester.boxed()) .or(post_validator_aggregate_and_proofs.boxed()) + .or(post_lighthouse_liveness.boxed()) .or(post_validator_beacon_committee_subscriptions.boxed()), )) .recover(warp_utils::reject::handle_rejection) diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index 69c9f738a76..16670b507df 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -185,7 +185,7 @@ fn compute_historic_proposer_duties( ensure_state_is_in_epoch(&mut state, state_root, epoch, &chain.spec)?; state } else { - StateId::slot(epoch.start_slot(T::EthSpec::slots_per_epoch())).state(&chain)? + StateId::slot(epoch.start_slot(T::EthSpec::slots_per_epoch())).state(chain)? }; // Ensure the state lookup was correct. diff --git a/beacon_node/http_api/src/validator_inclusion.rs b/beacon_node/http_api/src/validator_inclusion.rs index cdd2a51621b..9131d698fc3 100644 --- a/beacon_node/http_api/src/validator_inclusion.rs +++ b/beacon_node/http_api/src/validator_inclusion.rs @@ -4,35 +4,59 @@ use eth2::{ lighthouse::{GlobalValidatorInclusionData, ValidatorInclusionData}, types::ValidatorId, }; -use state_processing::per_epoch_processing::ValidatorStatuses; -use types::{Epoch, EthSpec}; +use state_processing::per_epoch_processing::{ + altair::participation_cache::Error as ParticipationCacheError, process_epoch, + EpochProcessingSummary, +}; +use types::{BeaconState, ChainSpec, Epoch, EthSpec}; -/// Returns information about *all validators* (i.e., global) and how they performed during a given -/// epoch. -pub fn global_validator_inclusion_data( +/// Returns the state in the last slot of `epoch`. +fn end_of_epoch_state( epoch: Epoch, chain: &BeaconChain, -) -> Result { +) -> Result, warp::reject::Rejection> { let target_slot = epoch.end_slot(T::EthSpec::slots_per_epoch()); + StateId::slot(target_slot).state(chain) +} - let state = StateId::slot(target_slot).state(chain)?; +/// Generate an `EpochProcessingSummary` for `state`. +/// +/// ## Notes +/// +/// Will mutate `state`, transitioning it to the next epoch. +fn get_epoch_processing_summary( + state: &mut BeaconState, + spec: &ChainSpec, +) -> Result { + process_epoch(state, spec) + .map_err(|e| warp_utils::reject::custom_server_error(format!("{:?}", e))) +} - let mut validator_statuses = ValidatorStatuses::new(&state, &chain.spec) - .map_err(warp_utils::reject::beacon_state_error)?; - validator_statuses - .process_attestations(&state) - .map_err(warp_utils::reject::beacon_state_error)?; +fn convert_cache_error(error: ParticipationCacheError) -> warp::reject::Rejection { + warp_utils::reject::custom_server_error(format!("{:?}", error)) +} - let totals = validator_statuses.total_balances; +/// Returns information about *all validators* (i.e., global) and how they performed during a given +/// epoch. +pub fn global_validator_inclusion_data( + epoch: Epoch, + chain: &BeaconChain, +) -> Result { + let mut state = end_of_epoch_state(epoch, chain)?; + let summary = get_epoch_processing_summary(&mut state, &chain.spec)?; Ok(GlobalValidatorInclusionData { - current_epoch_active_gwei: totals.current_epoch(), - previous_epoch_active_gwei: totals.previous_epoch(), - current_epoch_attesting_gwei: totals.current_epoch_attesters(), - current_epoch_target_attesting_gwei: totals.current_epoch_target_attesters(), - previous_epoch_attesting_gwei: totals.previous_epoch_attesters(), - previous_epoch_target_attesting_gwei: totals.previous_epoch_target_attesters(), - previous_epoch_head_attesting_gwei: totals.previous_epoch_head_attesters(), + current_epoch_active_gwei: summary.current_epoch_total_active_balance(), + previous_epoch_active_gwei: summary.previous_epoch_total_active_balance(), + current_epoch_target_attesting_gwei: summary + .current_epoch_target_attesting_balance() + .map_err(convert_cache_error)?, + previous_epoch_target_attesting_gwei: summary + .previous_epoch_target_attesting_balance() + .map_err(convert_cache_error)?, + previous_epoch_head_attesting_gwei: summary + .previous_epoch_head_attesting_balance() + .map_err(convert_cache_error)?, }) } @@ -42,15 +66,7 @@ pub fn validator_inclusion_data( validator_id: &ValidatorId, chain: &BeaconChain, ) -> Result, warp::Rejection> { - let target_slot = epoch.end_slot(T::EthSpec::slots_per_epoch()); - - let mut state = StateId::slot(target_slot).state(chain)?; - - let mut validator_statuses = ValidatorStatuses::new(&state, &chain.spec) - .map_err(warp_utils::reject::beacon_state_error)?; - validator_statuses - .process_attestations(&state) - .map_err(warp_utils::reject::beacon_state_error)?; + let mut state = end_of_epoch_state(epoch, chain)?; state .update_pubkey_cache() @@ -70,19 +86,31 @@ pub fn validator_inclusion_data( } }; - Ok(validator_statuses - .statuses - .get(validator_index) - .map(|vote| ValidatorInclusionData { - is_slashed: vote.is_slashed, - is_withdrawable_in_current_epoch: vote.is_withdrawable_in_current_epoch, - is_active_in_current_epoch: vote.is_active_in_current_epoch, - is_active_in_previous_epoch: vote.is_active_in_previous_epoch, - current_epoch_effective_balance_gwei: vote.current_epoch_effective_balance, - is_current_epoch_attester: vote.is_current_epoch_attester, - is_current_epoch_target_attester: vote.is_current_epoch_target_attester, - is_previous_epoch_attester: vote.is_previous_epoch_attester, - is_previous_epoch_target_attester: vote.is_previous_epoch_target_attester, - is_previous_epoch_head_attester: vote.is_previous_epoch_head_attester, - })) + // Obtain the validator *before* transitioning the state into the next epoch. + let validator = if let Ok(validator) = state.get_validator(validator_index) { + validator.clone() + } else { + return Ok(None); + }; + + let summary = get_epoch_processing_summary(&mut state, &chain.spec)?; + + Ok(Some(ValidatorInclusionData { + is_slashed: validator.slashed, + is_withdrawable_in_current_epoch: validator.is_withdrawable_at(epoch), + is_active_unslashed_in_current_epoch: summary + .is_active_unslashed_in_current_epoch(validator_index), + is_active_unslashed_in_previous_epoch: summary + .is_active_unslashed_in_previous_epoch(validator_index), + current_epoch_effective_balance_gwei: validator.effective_balance, + is_current_epoch_target_attester: summary + .is_current_epoch_target_attester(validator_index) + .map_err(convert_cache_error)?, + is_previous_epoch_target_attester: summary + .is_previous_epoch_target_attester(validator_index) + .map_err(convert_cache_error)?, + is_previous_epoch_head_attester: summary + .is_previous_epoch_head_attester(validator_index) + .map_err(convert_cache_error)?, + })) } diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index b82dd25fad0..470afbf0923 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -5,11 +5,11 @@ use beacon_chain::{ test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, BeaconChain, StateSkipConfig, WhenSlotSkipped, MAXIMUM_GOSSIP_CLOCK_DISPARITY, }; -use discv5::enr::{CombinedKey, EnrBuilder}; use environment::null_logger; use eth2::Error; use eth2::StatusCode; use eth2::{types::*, BeaconNodeHttpClient, Timeouts}; +use eth2_libp2p::discv5::enr::{CombinedKey, EnrBuilder}; use eth2_libp2p::{ rpc::methods::MetaData, types::{EnrBitfield, SyncState}, @@ -2149,6 +2149,71 @@ impl ApiTester { self } + pub async fn test_post_lighthouse_liveness(self) -> Self { + let epoch = self.chain.epoch().unwrap(); + let head_state = self.chain.head_beacon_state().unwrap(); + let indices = (0..head_state.validators().len()) + .map(|i| i as u64) + .collect::>(); + + // Construct the expected response + let expected: Vec = head_state + .validators() + .iter() + .enumerate() + .map(|(index, _)| LivenessResponseData { + index: index as u64, + is_live: false, + epoch, + }) + .collect(); + + let result = self + .client + .post_lighthouse_liveness(indices.as_slice(), epoch) + .await + .unwrap() + .data; + + assert_eq!(result, expected); + + // Attest to the current slot + self.client + .post_beacon_pool_attestations(self.attestations.as_slice()) + .await + .unwrap(); + + let result = self + .client + .post_lighthouse_liveness(indices.as_slice(), epoch) + .await + .unwrap() + .data; + + let committees = head_state + .get_beacon_committees_at_slot(self.chain.slot().unwrap()) + .unwrap(); + let attesting_validators: Vec = committees + .into_iter() + .map(|committee| committee.committee.iter().cloned()) + .flatten() + .collect(); + // All attesters should now be considered live + let expected = expected + .into_iter() + .map(|mut a| { + if attesting_validators.contains(&(a.index as usize)) { + a.is_live = true; + } + a + }) + .collect::>(); + + assert_eq!(result, expected); + + self + } + pub async fn test_get_events(self) -> Self { // Subscribe to all events let topics = vec![ @@ -2635,5 +2700,7 @@ async fn lighthouse_endpoints() { .test_get_lighthouse_beacon_states_ssz() .await .test_get_lighthouse_staking() + .await + .test_post_lighthouse_liveness() .await; } diff --git a/beacon_node/http_metrics/Cargo.toml b/beacon_node/http_metrics/Cargo.toml index e1746781bf1..aabf96b8273 100644 --- a/beacon_node/http_metrics/Cargo.toml +++ b/beacon_node/http_metrics/Cargo.toml @@ -23,7 +23,7 @@ warp_utils = { path = "../../common/warp_utils" } malloc_utils = { path = "../../common/malloc_utils" } [dev-dependencies] -tokio = { version = "1.1.0", features = ["sync"] } +tokio = { version = "1.7.1", features = ["sync"] } reqwest = { version = "0.11.0", features = ["json"] } environment = { path = "../../lighthouse/environment" } types = { path = "../../consensus/types" } diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 10808c58eb3..63990a54c88 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -15,7 +15,6 @@ slog-term = "2.6.0" slog-async = "2.5.0" logging = { path = "../../common/logging" } environment = { path = "../../lighthouse/environment" } -discv5 = { version = "0.1.0-beta.3" } [dependencies] beacon_chain = { path = "../beacon_chain" } @@ -32,7 +31,7 @@ eth2_ssz_types = { path = "../../consensus/ssz_types" } tree_hash = "0.1.1" futures = "0.3.7" error-chain = "0.12.4" -tokio = { version = "1.1.0", features = ["full"] } +tokio = { version = "1.7.1", features = ["full"] } tokio-stream = "0.1.3" parking_lot = "0.11.0" smallvec = "1.6.1" diff --git a/beacon_node/network/src/beacon_processor/tests.rs b/beacon_node/network/src/beacon_processor/tests.rs index 50050c5be50..617b9a87153 100644 --- a/beacon_node/network/src/beacon_processor/tests.rs +++ b/beacon_node/network/src/beacon_processor/tests.rs @@ -8,8 +8,8 @@ use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; use beacon_chain::{BeaconChain, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; -use discv5::enr::{CombinedKey, EnrBuilder}; use environment::{null_logger, Environment, EnvironmentBuilder}; +use eth2_libp2p::discv5::enr::{CombinedKey, EnrBuilder}; use eth2_libp2p::{rpc::methods::MetaData, types::EnrBitfield, MessageId, NetworkGlobals, PeerId}; use slot_clock::SlotClock; use std::cmp; @@ -553,14 +553,14 @@ fn import_gossip_block_at_current_slot() { fn import_gossip_attestation() { let mut rig = TestRig::new(SMALL_CHAIN); - let initial_attns = rig.chain.naive_aggregation_pool.read().num_attestations(); + let initial_attns = rig.chain.naive_aggregation_pool.read().num_items(); rig.enqueue_unaggregated_attestation(); rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]); assert_eq!( - rig.chain.naive_aggregation_pool.read().num_attestations(), + rig.chain.naive_aggregation_pool.read().num_items(), initial_attns + 1, "op pool should have one more attestation" ); @@ -578,14 +578,14 @@ fn attestation_to_unknown_block_processed(import_method: BlockImportMethod) { // Send the attestation but not the block, and check that it was not imported. - let initial_attns = rig.chain.naive_aggregation_pool.read().num_attestations(); + let initial_attns = rig.chain.naive_aggregation_pool.read().num_items(); rig.enqueue_next_block_unaggregated_attestation(); rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]); assert_eq!( - rig.chain.naive_aggregation_pool.read().num_attestations(), + rig.chain.naive_aggregation_pool.read().num_items(), initial_attns, "Attestation should not have been included." ); @@ -616,7 +616,7 @@ fn attestation_to_unknown_block_processed(import_method: BlockImportMethod) { ); assert_eq!( - rig.chain.naive_aggregation_pool.read().num_attestations(), + rig.chain.naive_aggregation_pool.read().num_items(), initial_attns + 1, "Attestation should have been included." ); @@ -707,14 +707,14 @@ fn requeue_unknown_block_gossip_attestation_without_import() { // Send the attestation but not the block, and check that it was not imported. - let initial_attns = rig.chain.naive_aggregation_pool.read().num_attestations(); + let initial_attns = rig.chain.naive_aggregation_pool.read().num_items(); rig.enqueue_next_block_unaggregated_attestation(); rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]); assert_eq!( - rig.chain.naive_aggregation_pool.read().num_attestations(), + rig.chain.naive_aggregation_pool.read().num_items(), initial_attns, "Attestation should not have been included." ); @@ -727,7 +727,7 @@ fn requeue_unknown_block_gossip_attestation_without_import() { ); assert_eq!( - rig.chain.naive_aggregation_pool.read().num_attestations(), + rig.chain.naive_aggregation_pool.read().num_items(), initial_attns, "Attestation should not have been included." ); @@ -748,7 +748,7 @@ fn requeue_unknown_block_gossip_aggregated_attestation_without_import() { rig.assert_event_journal(&[GOSSIP_AGGREGATE, WORKER_FREED, NOTHING_TO_DO]); assert_eq!( - rig.chain.naive_aggregation_pool.read().num_attestations(), + rig.chain.naive_aggregation_pool.read().num_items(), initial_attns, "Attestation should not have been included." ); diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index f705d73f147..8813443b7b9 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -10,7 +10,7 @@ use beacon_chain::{ BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, GossipVerifiedBlock, }; use eth2_libp2p::{MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; -use slog::{crit, debug, error, info, trace, warn}; +use slog::{debug, error, info, trace, warn}; use slot_clock::SlotClock; use ssz::Encode; use std::time::{Duration, SystemTime, UNIX_EPOCH}; diff --git a/beacon_node/network/src/beacon_processor/worker/mod.rs b/beacon_node/network/src/beacon_processor/worker/mod.rs index 8c476590f40..2acfb1fb5f2 100644 --- a/beacon_node/network/src/beacon_processor/worker/mod.rs +++ b/beacon_node/network/src/beacon_processor/worker/mod.rs @@ -1,7 +1,7 @@ use super::work_reprocessing_queue::ReprocessQueueMessage; use crate::{service::NetworkMessage, sync::SyncMessage}; use beacon_chain::{BeaconChain, BeaconChainTypes}; -use slog::{error, Logger}; +use slog::{debug, Logger}; use std::sync::Arc; use tokio::sync::mpsc; @@ -28,7 +28,7 @@ impl Worker { /// Creates a log if there is an internal error. fn send_sync_message(&self, message: SyncMessage) { self.sync_tx.send(message).unwrap_or_else(|e| { - error!(self.log, "Could not send message to the sync service"; + debug!(self.log, "Could not send message to the sync service, likely shutdown"; "error" => %e) }); } @@ -38,7 +38,7 @@ impl Worker { /// Creates a log if there is an internal error. fn send_network_message(&self, message: NetworkMessage) { self.network_tx.send(message).unwrap_or_else(|e| { - error!(self.log, "Could not send message to the network service"; + debug!(self.log, "Could not send message to the network service, likely shutdown"; "error" => %e) }); } diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index a201a97d83b..bc0537e28e6 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -410,7 +410,7 @@ pub fn expose_publish_metrics(messages: &[PubsubMessage]) { PubsubMessage::Attestation(subnet_id) => { inc_counter_vec( &ATTESTATIONS_PUBLISHED_PER_SUBNET_PER_SLOT, - &[&subnet_id.0.as_ref()], + &[subnet_id.0.as_ref()], ); inc_counter(&GOSSIP_UNAGGREGATED_ATTESTATIONS_TX) } @@ -577,7 +577,7 @@ pub fn update_gossip_metrics( // mesh peers for topic_hash in gossipsub.topics() { - let peers = gossipsub.mesh_peers(&topic_hash).count(); + let peers = gossipsub.mesh_peers(topic_hash).count(); if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { match topic.kind() { GossipKind::Attestation(subnet_id) => { @@ -633,7 +633,7 @@ pub fn update_gossip_metrics( if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { match topic.kind() { GossipKind::BeaconBlock => { - for peer in gossipsub.mesh_peers(&topic_hash) { + for peer in gossipsub.mesh_peers(topic_hash) { if let Some(client) = peer_to_client.get(peer) { if let Some(v) = get_int_gauge(&BEACON_BLOCK_MESH_PEERS_PER_CLIENT, &[client]) @@ -644,7 +644,7 @@ pub fn update_gossip_metrics( } } GossipKind::BeaconAggregateAndProof => { - for peer in gossipsub.mesh_peers(&topic_hash) { + for peer in gossipsub.mesh_peers(topic_hash) { if let Some(client) = peer_to_client.get(peer) { if let Some(v) = get_int_gauge( &BEACON_AGGREGATE_AND_PROOF_MESH_PEERS_PER_CLIENT, diff --git a/beacon_node/network/src/persisted_dht.rs b/beacon_node/network/src/persisted_dht.rs index 59b0bd9ab24..881be15a7f8 100644 --- a/beacon_node/network/src/persisted_dht.rs +++ b/beacon_node/network/src/persisted_dht.rs @@ -27,6 +27,13 @@ pub fn persist_dht, Cold: ItemStore>( store.put_item(&DHT_DB_KEY, &PersistedDht { enrs }) } +/// Attempts to clear any DHT entries. +pub fn clear_dht, Cold: ItemStore>( + store: Arc>, +) -> Result<(), store::Error> { + store.hot_db.delete::(&DHT_DB_KEY) +} + /// Wrapper around DHT for persistence to disk. pub struct PersistedDht { pub enrs: Vec, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 75786cdc510..1f94c387d01 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -1,4 +1,4 @@ -use crate::persisted_dht::{load_dht, persist_dht}; +use crate::persisted_dht::{clear_dht, load_dht, persist_dht}; use crate::router::{Router, RouterMessage}; use crate::{ attestation_service::{AttServiceMessage, AttestationService}, @@ -178,7 +178,7 @@ impl NetworkService { "Loading peers into the routing table"; "peers" => enrs_to_load.len() ); for enr in enrs_to_load { - libp2p.swarm.add_enr(enr.clone()); + libp2p.swarm.behaviour_mut().add_enr(enr.clone()); } } @@ -195,7 +195,7 @@ impl NetworkService { // attestation service let attestation_service = - AttestationService::new(beacon_chain.clone(), &config, &network_log); + AttestationService::new(beacon_chain.clone(), config, &network_log); // create a timer for updating network metrics let metrics_update = tokio::time::interval(Duration::from_secs(METRIC_UPDATE_INTERVAL)); @@ -251,7 +251,7 @@ fn spawn_service( .map(|gauge| gauge.reset()); } metrics::update_gossip_metrics::( - &service.libp2p.swarm.gs(), + service.libp2p.swarm.behaviour_mut().gs(), &service.network_globals, ); // update sync metrics @@ -287,8 +287,7 @@ fn spawn_service( }) ) }).unwrap_or(None) { - if (*service.libp2p.swarm) - .update_gossipsub_parameters(active_validators, slot).is_err() { + if service.libp2p.swarm.behaviour_mut().update_gossipsub_parameters(active_validators, slot).is_err() { error!( service.log, "Failed to update gossipsub parameters"; @@ -314,7 +313,7 @@ fn spawn_service( service.upnp_mappings = (tcp_socket.map(|s| s.port()), udp_socket.map(|s| s.port())); // If there is an external TCP port update, modify our local ENR. if let Some(tcp_socket) = tcp_socket { - if let Err(e) = service.libp2p.swarm.peer_manager().discovery_mut().update_enr_tcp_port(tcp_socket.port()) { + if let Err(e) = service.libp2p.swarm.behaviour_mut().discovery_mut().update_enr_tcp_port(tcp_socket.port()) { warn!(service.log, "Failed to update ENR"; "error" => e); } } @@ -322,7 +321,7 @@ fn spawn_service( // UPnP mappings if !service.discovery_auto_update { if let Some(udp_socket) = udp_socket { - if let Err(e) = service.libp2p.swarm.peer_manager().discovery_mut().update_enr_udp_socket(udp_socket) { + if let Err(e) = service.libp2p.swarm.behaviour_mut().discovery_mut().update_enr_udp_socket(udp_socket) { warn!(service.log, "Failed to update ENR"; "error" => e); } } @@ -341,6 +340,7 @@ fn spawn_service( service .libp2p .swarm + .behaviour_mut() .report_message_validation_result( &propagation_source, message_id, validation_result ); @@ -359,7 +359,7 @@ fn spawn_service( "topics" => ?topic_kinds ); metrics::expose_publish_metrics(&messages); - service.libp2p.swarm.publish(messages); + service.libp2p.swarm.behaviour_mut().publish(messages); } NetworkMessage::ReportPeer { peer_id, action, source } => service.libp2p.report_peer(&peer_id, action, source), NetworkMessage::GoodbyePeer { peer_id, reason, source } => service.libp2p.goodbye_peer(&peer_id, reason, source), @@ -375,7 +375,7 @@ fn spawn_service( let already_subscribed = service.network_globals.gossipsub_subscriptions.read().clone(); let already_subscribed = already_subscribed.iter().map(|x| x.kind()).collect::>(); for topic_kind in eth2_libp2p::types::CORE_TOPICS.iter().filter(|topic| already_subscribed.get(topic).is_none()) { - if service.libp2p.swarm.subscribe_kind(topic_kind.clone()) { + if service.libp2p.swarm.behaviour_mut().subscribe_kind(topic_kind.clone()) { subscribed_topics.push(topic_kind.clone()); } else { warn!(service.log, "Could not subscribe to topic"; "topic" => %topic_kind); @@ -387,9 +387,9 @@ fn spawn_service( for subnet_id in 0..<::EthSpec as EthSpec>::SubnetBitfieldLength::to_u64() { let subnet_id = SubnetId::new(subnet_id); let topic_kind = eth2_libp2p::types::GossipKind::Attestation(subnet_id); - if service.libp2p.swarm.subscribe_kind(topic_kind.clone()) { + if service.libp2p.swarm.behaviour_mut().subscribe_kind(topic_kind.clone()) { // Update the ENR bitfield. - service.libp2p.swarm.update_enr_subnet(subnet_id, true); + service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet_id, true); subscribed_topics.push(topic_kind.clone()); } else { warn!(service.log, "Could not subscribe to topic"; "topic" => %topic_kind); @@ -407,19 +407,19 @@ fn spawn_service( Some(attestation_service_message) = service.attestation_service.next() => { match attestation_service_message { AttServiceMessage::Subscribe(subnet_id) => { - service.libp2p.swarm.subscribe_to_subnet(subnet_id); + service.libp2p.swarm.behaviour_mut().subscribe_to_subnet(subnet_id); } AttServiceMessage::Unsubscribe(subnet_id) => { - service.libp2p.swarm.unsubscribe_from_subnet(subnet_id); + service.libp2p.swarm.behaviour_mut().unsubscribe_from_subnet(subnet_id); } AttServiceMessage::EnrAdd(subnet_id) => { - service.libp2p.swarm.update_enr_subnet(subnet_id, true); + service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet_id, true); } AttServiceMessage::EnrRemove(subnet_id) => { - service.libp2p.swarm.update_enr_subnet(subnet_id, false); + service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet_id, false); } AttServiceMessage::DiscoverPeers(subnets_to_discover) => { - service.libp2p.swarm.discover_subnet_peers(subnets_to_discover); + service.libp2p.swarm.behaviour_mut().discover_subnet_peers(subnets_to_discover); } } } @@ -427,17 +427,15 @@ fn spawn_service( // poll the swarm match libp2p_event { Libp2pEvent::Behaviour(event) => match event { - - BehaviourEvent::PeerDialed(peer_id) => { + BehaviourEvent::PeerConnectedOutgoing(peer_id) => { let _ = service .router_send .send(RouterMessage::PeerDialed(peer_id)) .map_err(|_| { debug!(service.log, "Failed to send peer dialed to router"); }); }, - BehaviourEvent::PeerConnected(_peer_id) => { - // A peer has connected to us - // We currently do not perform any action here. + BehaviourEvent::PeerConnectedIncoming(_) | BehaviourEvent::PeerBanned(_) | BehaviourEvent::PeerUnbanned(_) => { + // No action required for these events. }, BehaviourEvent::PeerDisconnected(peer_id) => { let _ = service @@ -541,6 +539,7 @@ fn spawn_service( service .libp2p .swarm + .behaviour_mut() .update_fork_version(service.beacon_chain.enr_fork_id()); service.next_fork_update = next_fork_delay(&service.beacon_chain); } @@ -566,12 +565,16 @@ fn next_fork_delay( impl Drop for NetworkService { fn drop(&mut self) { // network thread is terminating - let enrs = self.libp2p.swarm.enr_entries(); + let enrs = self.libp2p.swarm.behaviour_mut().enr_entries(); debug!( self.log, "Persisting DHT to store"; "Number of peers" => enrs.len(), ); + if let Err(e) = clear_dht::(self.store.clone()) { + error!(self.log, "Failed to clear old DHT entries"; "error" => ?e); + } + // Still try to update new entries match persist_dht::(self.store.clone(), enrs) { Err(e) => error!( self.log, diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index 731009032ed..5f411260ffe 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -126,7 +126,7 @@ impl BatchInfo { BatchState::Downloading(peer_id, _, _) | BatchState::AwaitingProcessing(peer_id, _) | BatchState::Processing(Attempt { peer_id, .. }) - | BatchState::AwaitingValidation(Attempt { peer_id, .. }) => Some(&peer_id), + | BatchState::AwaitingValidation(Attempt { peer_id, .. }) => Some(peer_id), BatchState::Poisoned => unreachable!("Poisoned batch"), } } diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index 219932c84b4..32115913372 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -5,6 +5,7 @@ authors = ["Michael Sproul "] edition = "2018" [dependencies] +derivative = "2.1.1" itertools = "0.10.0" int_to_bytes = { path = "../../consensus/int_to_bytes" } lazy_static = "1.4.0" @@ -18,6 +19,7 @@ rayon = "1.5.0" serde = "1.0.116" serde_derive = "1.0.116" store = { path = "../store" } +superstruct = "0.2.0" [dev-dependencies] rand = "0.7.3" diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 85faaa1f4d5..f35f12f2cd8 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -4,9 +4,13 @@ mod attester_slashing; mod max_cover; mod metrics; mod persistence; +mod sync_aggregate_id; -pub use persistence::PersistedOperationPool; +pub use persistence::{ + PersistedOperationPool, PersistedOperationPoolAltair, PersistedOperationPoolBase, +}; +use crate::sync_aggregate_id::SyncAggregateId; use attestation::AttMaxCover; use attestation_id::AttestationId; use attester_slashing::AttesterSlashingMaxCover; @@ -18,18 +22,24 @@ use state_processing::per_block_processing::{ VerifySignatures, }; use state_processing::SigVerifiedOp; -use std::collections::{hash_map, HashMap, HashSet}; +use std::collections::{hash_map::Entry, HashMap, HashSet}; use std::marker::PhantomData; use std::ptr; use types::{ - typenum::Unsigned, Attestation, AttesterSlashing, BeaconState, BeaconStateError, ChainSpec, - Epoch, EthSpec, Fork, ForkVersion, Hash256, ProposerSlashing, RelativeEpoch, - SignedVoluntaryExit, Validator, + sync_aggregate::Error as SyncAggregateError, typenum::Unsigned, Attestation, AttesterSlashing, + BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, Fork, ForkVersion, Hash256, + ProposerSlashing, RelativeEpoch, SignedVoluntaryExit, Slot, SyncAggregate, + SyncCommitteeContribution, Validator, }; + +type SyncContributions = RwLock>>>; + #[derive(Default, Debug)] pub struct OperationPool { /// Map from attestation ID (see below) to vectors of attestations. attestations: RwLock>>>, + /// Map from sync aggregate ID to the best `SyncCommitteeContribution`s seen for that ID. + sync_contributions: SyncContributions, /// Set of attester slashings, and the fork version they were verified against. attester_slashings: RwLock, ForkVersion)>>, /// Map from proposer index to slashing. @@ -42,6 +52,15 @@ pub struct OperationPool { #[derive(Debug, PartialEq)] pub enum OpPoolError { GetAttestationsTotalBalanceError(BeaconStateError), + GetBlockRootError(BeaconStateError), + SyncAggregateError(SyncAggregateError), + IncorrectOpPoolVariant, +} + +impl From for OpPoolError { + fn from(e: SyncAggregateError) -> Self { + OpPoolError::SyncAggregateError(e) + } } impl OperationPool { @@ -50,6 +69,97 @@ impl OperationPool { Self::default() } + /// Insert a sync contribution into the pool. We don't aggregate these contributions until they + /// are retrieved from the pool. + /// + /// ## Note + /// + /// This function assumes the given `contribution` is valid. + pub fn insert_sync_contribution( + &self, + contribution: SyncCommitteeContribution, + ) -> Result<(), OpPoolError> { + let aggregate_id = SyncAggregateId::new(contribution.slot, contribution.beacon_block_root); + let mut contributions = self.sync_contributions.write(); + + match contributions.entry(aggregate_id) { + Entry::Vacant(entry) => { + // If no contributions exist for the key, insert the given contribution. + entry.insert(vec![contribution]); + } + Entry::Occupied(mut entry) => { + // If contributions exists for this key, check whether there exists a contribution + // with a matching `subcommittee_index`. If one exists, check whether the new or + // old contribution has more aggregation bits set. If the new one does, add it to the + // pool in place of the old one. + let existing_contributions = entry.get_mut(); + match existing_contributions + .iter_mut() + .find(|existing_contribution| { + existing_contribution.subcommittee_index == contribution.subcommittee_index + }) { + Some(existing_contribution) => { + // Only need to replace the contribution if the new contribution has more + // bits set. + if existing_contribution.aggregation_bits.num_set_bits() + < contribution.aggregation_bits.num_set_bits() + { + *existing_contribution = contribution; + } + } + None => { + // If there has been no previous sync contribution for this subcommittee index, + // add it to the pool. + existing_contributions.push(contribution); + } + } + } + }; + Ok(()) + } + + /// Calculate the `SyncAggregate` from the sync contributions that exist in the pool for the + /// slot previous to the slot associated with `state`. Return the calculated `SyncAggregate` if + /// contributions exist at this slot, or else `None`. + pub fn get_sync_aggregate( + &self, + state: &BeaconState, + ) -> Result>, OpPoolError> { + // Sync aggregates are formed from the contributions from the previous slot. + let slot = state.slot().saturating_sub(1u64); + let block_root = *state + .get_block_root(slot) + .map_err(OpPoolError::GetBlockRootError)?; + let id = SyncAggregateId::new(slot, block_root); + self.sync_contributions + .read() + .get(&id) + .map(|contributions| SyncAggregate::from_contributions(contributions)) + .transpose() + .map_err(|e| e.into()) + } + + /// Total number of sync contributions in the pool. + pub fn num_sync_contributions(&self) -> usize { + self.sync_contributions + .read() + .values() + .map(|contributions| contributions.len()) + .sum() + } + + /// Remove sync contributions which are too old to be included in a block. + pub fn prune_sync_contributions(&self, current_slot: Slot) { + // Prune sync contributions that are from before the previous slot. + self.sync_contributions.write().retain(|_, contributions| { + // All the contributions in this bucket have the same data, so we only need to + // check the first one. + contributions.first().map_or(false, |contribution| { + current_slot <= contribution.slot.saturating_add(Slot::new(1)) + }) + }); + } + /// Insert an attestation into the pool, aggregating it with existing attestations if possible. /// /// ## Note @@ -68,11 +178,11 @@ impl OperationPool { let mut attestations = self.attestations.write(); let existing_attestations = match attestations.entry(id) { - hash_map::Entry::Vacant(entry) => { + Entry::Vacant(entry) => { entry.insert(vec![attestation]); return Ok(()); } - hash_map::Entry::Occupied(entry) => entry.into_mut(), + Entry::Occupied(entry) => entry.into_mut(), }; let mut aggregated = false; @@ -273,7 +383,7 @@ impl OperationPool { let relevant_attester_slashings = reader.iter().flat_map(|(slashing, fork)| { if *fork == state.fork().previous_version || *fork == state.fork().current_version { - AttesterSlashingMaxCover::new(&slashing, &to_be_slashed, state) + AttesterSlashingMaxCover::new(slashing, &to_be_slashed, state) } else { None } @@ -376,6 +486,7 @@ impl OperationPool { /// Prune all types of transactions given the latest head state and head fork. pub fn prune_all(&self, head_state: &BeaconState, current_epoch: Epoch) { self.prune_attestations(current_epoch); + self.prune_sync_contributions(head_state.slot()); self.prune_proposer_slashings(head_state); self.prune_attester_slashings(head_state); self.prune_voluntary_exits(head_state); @@ -498,18 +609,19 @@ impl PartialEq for OperationPool { #[cfg(all(test, not(debug_assertions)))] mod release_tests { - use lazy_static::lazy_static; - use super::attestation::earliest_attestation_validators; use super::*; - use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; + use beacon_chain::test_utils::{ + BeaconChainHarness, EphemeralHarnessType, RelativeSyncCommittee, + }; + use lazy_static::lazy_static; use state_processing::{ common::{base::get_base_reward, get_attesting_indices}, VerifyOperation, }; use std::collections::BTreeSet; use std::iter::FromIterator; - use store::StoreConfig; + use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use types::*; pub const MAX_VALIDATOR_COUNT: usize = 4 * 32 * 128; @@ -521,13 +633,10 @@ mod release_tests { fn get_harness( validator_count: usize, + spec: Option, ) -> BeaconChainHarness> { - let harness = BeaconChainHarness::new_with_store_config( - E::default(), - None, - KEYPAIRS[0..validator_count].to_vec(), - StoreConfig::default(), - ); + let harness = + BeaconChainHarness::new(E::default(), spec, KEYPAIRS[0..validator_count].to_vec()); harness.advance_slot(); @@ -542,14 +651,30 @@ mod release_tests { let num_validators = num_committees * E::slots_per_epoch() as usize * spec.target_committee_size; - let harness = get_harness::(num_validators); + let harness = get_harness::(num_validators, None); - let slot_offset = 5 * E::slots_per_epoch() + E::slots_per_epoch() / 2; + (harness, spec) + } - // advance until we have finalized and justified epochs - for _ in 0..slot_offset { - harness.advance_slot(); - } + /// Test state for sync contribution-related tests. + fn sync_contribution_test_state( + num_committees: usize, + ) -> (BeaconChainHarness>, ChainSpec) { + let mut spec = E::default_spec(); + + spec.altair_fork_epoch = Some(Epoch::new(0)); + + let num_validators = + num_committees * E::slots_per_epoch() as usize * spec.target_committee_size; + let harness = get_harness::(num_validators, Some(spec.clone())); + + let state = harness.get_current_state(); + harness.add_attested_blocks_at_slots( + state, + Hash256::zero(), + &[Slot::new(1)], + (0..num_validators).collect::>().as_slice(), + ); (harness, spec) } @@ -558,7 +683,7 @@ mod release_tests { fn test_earliest_attestation() { let (harness, ref spec) = attestation_test_state::(1); let mut state = harness.get_current_state(); - let slot = state.slot() - 1; + let slot = state.slot(); let committees = state .get_beacon_committees_at_slot(slot) .unwrap() @@ -629,7 +754,7 @@ mod release_tests { let op_pool = OperationPool::::new(); let mut state = harness.get_current_state(); - let slot = state.slot() - 1; + let slot = state.slot(); let committees = state .get_beacon_committees_at_slot(slot) .unwrap() @@ -666,7 +791,6 @@ mod release_tests { assert_eq!(op_pool.num_attestations(), committees.len()); // Before the min attestation inclusion delay, get_attestations shouldn't return anything. - *state.slot_mut() -= 1; assert_eq!( op_pool .get_attestations(&state, |_| true, |_| true, spec) @@ -709,7 +833,7 @@ mod release_tests { let op_pool = OperationPool::::new(); - let slot = state.slot() - 1; + let slot = state.slot(); let committees = state .get_beacon_committees_at_slot(slot) .unwrap() @@ -755,7 +879,7 @@ mod release_tests { let op_pool = OperationPool::::new(); - let slot = state.slot() - 1; + let slot = state.slot(); let committees = state .get_beacon_committees_at_slot(slot) .unwrap() @@ -852,7 +976,7 @@ mod release_tests { let op_pool = OperationPool::::new(); - let slot = state.slot() - 1; + let slot = state.slot(); let committees = state .get_beacon_committees_at_slot(slot) .unwrap() @@ -941,7 +1065,7 @@ mod release_tests { let mut state = harness.get_current_state(); let op_pool = OperationPool::::new(); - let slot = state.slot() - 1; + let slot = state.slot(); let committees = state .get_beacon_committees_at_slot(slot) .unwrap() @@ -1071,7 +1195,7 @@ mod release_tests { /// Insert two slashings for the same proposer and ensure only one is returned. #[test] fn duplicate_proposer_slashing() { - let harness = get_harness(32); + let harness = get_harness(32, None); let state = harness.get_current_state(); let op_pool = OperationPool::::new(); @@ -1096,7 +1220,7 @@ mod release_tests { // Sanity check on the pruning of proposer slashings #[test] fn prune_proposer_slashing_noop() { - let harness = get_harness(32); + let harness = get_harness(32, None); let state = harness.get_current_state(); let op_pool = OperationPool::::new(); @@ -1109,7 +1233,7 @@ mod release_tests { // Sanity check on the pruning of attester slashings #[test] fn prune_attester_slashing_noop() { - let harness = get_harness(32); + let harness = get_harness(32, None); let spec = &harness.spec; let state = harness.get_current_state(); let op_pool = OperationPool::::new(); @@ -1126,7 +1250,7 @@ mod release_tests { // Check that we get maximum coverage for attester slashings (highest qty of validators slashed) #[test] fn simple_max_cover_attester_slashing() { - let harness = get_harness(32); + let harness = get_harness(32, None); let spec = &harness.spec; let state = harness.get_current_state(); let op_pool = OperationPool::::new(); @@ -1160,7 +1284,7 @@ mod release_tests { // Check that we get maximum coverage for attester slashings with overlapping indices #[test] fn overlapping_max_cover_attester_slashing() { - let harness = get_harness(32); + let harness = get_harness(32, None); let spec = &harness.spec; let state = harness.get_current_state(); let op_pool = OperationPool::::new(); @@ -1194,7 +1318,7 @@ mod release_tests { // Max coverage of attester slashings taking into account proposer slashings #[test] fn max_coverage_attester_proposer_slashings() { - let harness = get_harness(32); + let harness = get_harness(32, None); let spec = &harness.spec; let state = harness.get_current_state(); let op_pool = OperationPool::::new(); @@ -1225,7 +1349,7 @@ mod release_tests { //Max coverage checking that non overlapping indices are still recognized for their value #[test] fn max_coverage_different_indices_set() { - let harness = get_harness(32); + let harness = get_harness(32, None); let spec = &harness.spec; let state = harness.get_current_state(); let op_pool = OperationPool::::new(); @@ -1257,7 +1381,7 @@ mod release_tests { //Max coverage should be affected by the overall effective balances #[test] fn max_coverage_effective_balances() { - let harness = get_harness(32); + let harness = get_harness(32, None); let spec = &harness.spec; let mut state = harness.get_current_state(); let op_pool = OperationPool::::new(); @@ -1285,4 +1409,268 @@ mod release_tests { let best_slashings = op_pool.get_slashings(&state); assert_eq!(best_slashings.1, vec![slashing_2, slashing_3]); } + + /// End-to-end test of basic sync contribution handling. + #[test] + fn sync_contribution_aggregation_insert_get_prune() { + let (harness, _) = sync_contribution_test_state::(1); + + let op_pool = OperationPool::::new(); + let state = harness.get_current_state(); + + let block_root = *state + .get_block_root(state.slot() - Slot::new(1)) + .ok() + .expect("block root should exist at slot"); + let contributions = harness.make_sync_contributions( + &state, + block_root, + state.slot() - Slot::new(1), + RelativeSyncCommittee::Current, + ); + + for (_, contribution_and_proof) in contributions { + let contribution = contribution_and_proof + .expect("contribution exists for committee") + .message + .contribution; + op_pool.insert_sync_contribution(contribution).unwrap(); + } + + assert_eq!(op_pool.sync_contributions.read().len(), 1); + assert_eq!( + op_pool.num_sync_contributions(), + SYNC_COMMITTEE_SUBNET_COUNT as usize + ); + + let sync_aggregate = op_pool + .get_sync_aggregate(&state) + .expect("Should calculate the sync aggregate") + .expect("Should have block sync aggregate"); + assert_eq!( + sync_aggregate.sync_committee_bits.num_set_bits(), + MainnetEthSpec::sync_committee_size() + ); + + // Prune sync contributions shouldn't do anything at this point. + op_pool.prune_sync_contributions(state.slot() - Slot::new(1)); + assert_eq!( + op_pool.num_sync_contributions(), + SYNC_COMMITTEE_SUBNET_COUNT as usize + ); + op_pool.prune_sync_contributions(state.slot()); + assert_eq!( + op_pool.num_sync_contributions(), + SYNC_COMMITTEE_SUBNET_COUNT as usize + ); + + // But once we advance to more than one slot after the contribution, it should prune it + // out of existence. + op_pool.prune_sync_contributions(state.slot() + Slot::new(1)); + assert_eq!(op_pool.num_sync_contributions(), 0); + } + + /// Adding a sync contribution already in the pool should not increase the size of the pool. + #[test] + fn sync_contribution_duplicate() { + let (harness, _) = sync_contribution_test_state::(1); + + let op_pool = OperationPool::::new(); + let state = harness.get_current_state(); + let block_root = *state + .get_block_root(state.slot() - Slot::new(1)) + .ok() + .expect("block root should exist at slot"); + let contributions = harness.make_sync_contributions( + &state, + block_root, + state.slot() - Slot::new(1), + RelativeSyncCommittee::Current, + ); + + for (_, contribution_and_proof) in contributions { + let contribution = contribution_and_proof + .expect("contribution exists for committee") + .message + .contribution; + op_pool + .insert_sync_contribution(contribution.clone()) + .unwrap(); + op_pool.insert_sync_contribution(contribution).unwrap(); + } + + assert_eq!(op_pool.sync_contributions.read().len(), 1); + assert_eq!( + op_pool.num_sync_contributions(), + SYNC_COMMITTEE_SUBNET_COUNT as usize + ); + } + + /// Adding a sync contribution already in the pool with more bits set should increase the + /// number of bits set in the aggregate. + #[test] + fn sync_contribution_with_more_bits() { + let (harness, _) = sync_contribution_test_state::(1); + + let op_pool = OperationPool::::new(); + let state = harness.get_current_state(); + let block_root = *state + .get_block_root(state.slot() - Slot::new(1)) + .ok() + .expect("block root should exist at slot"); + let contributions = harness.make_sync_contributions( + &state, + block_root, + state.slot() - Slot::new(1), + RelativeSyncCommittee::Current, + ); + + let expected_bits = MainnetEthSpec::sync_committee_size() - (2 * contributions.len()); + let mut first_contribution = contributions[0] + .1 + .as_ref() + .unwrap() + .message + .contribution + .clone(); + + // Add all contributions, but unset the first two bits of each. + for (_, contribution_and_proof) in contributions { + let mut contribution_fewer_bits = contribution_and_proof + .expect("contribution exists for committee") + .message + .contribution; + + // Unset the first two bits of each contribution. + contribution_fewer_bits + .aggregation_bits + .set(0, false) + .expect("set bit"); + contribution_fewer_bits + .aggregation_bits + .set(1, false) + .expect("set bit"); + + op_pool + .insert_sync_contribution(contribution_fewer_bits) + .unwrap(); + } + + let sync_aggregate = op_pool + .get_sync_aggregate(&state) + .expect("Should calculate the sync aggregate") + .expect("Should have block sync aggregate"); + assert_eq!( + sync_aggregate.sync_committee_bits.num_set_bits(), + expected_bits + ); + + // Unset the first bit of the first contribution and re-insert it. This should increase the + // number of bits set in the sync aggregate by one. + first_contribution + .aggregation_bits + .set(0, false) + .expect("set bit"); + op_pool + .insert_sync_contribution(first_contribution) + .unwrap(); + + // The sync aggregate should now include the additional set bit. + let sync_aggregate = op_pool + .get_sync_aggregate(&state) + .expect("Should calculate the sync aggregate") + .expect("Should have block sync aggregate"); + assert_eq!( + sync_aggregate.sync_committee_bits.num_set_bits(), + expected_bits + 1 + ); + } + + /// Adding a sync contribution already in the pool with fewer bits set should not increase the + /// number of bits set in the aggregate. + #[test] + fn sync_contribution_with_fewer_bits() { + let (harness, _) = sync_contribution_test_state::(1); + + let op_pool = OperationPool::::new(); + let state = harness.get_current_state(); + let block_root = *state + .get_block_root(state.slot() - Slot::new(1)) + .ok() + .expect("block root should exist at slot"); + let contributions = harness.make_sync_contributions( + &state, + block_root, + state.slot() - Slot::new(1), + RelativeSyncCommittee::Current, + ); + + let expected_bits = MainnetEthSpec::sync_committee_size() - (2 * contributions.len()); + let mut first_contribution = contributions[0] + .1 + .as_ref() + .unwrap() + .message + .contribution + .clone(); + + // Add all contributions, but unset the first two bits of each. + for (_, contribution_and_proof) in contributions { + let mut contribution_fewer_bits = contribution_and_proof + .expect("contribution exists for committee") + .message + .contribution; + + // Unset the first two bits of each contribution. + contribution_fewer_bits + .aggregation_bits + .set(0, false) + .expect("set bit"); + contribution_fewer_bits + .aggregation_bits + .set(1, false) + .expect("set bit"); + + op_pool + .insert_sync_contribution(contribution_fewer_bits) + .unwrap(); + } + + let sync_aggregate = op_pool + .get_sync_aggregate(&state) + .expect("Should calculate the sync aggregate") + .expect("Should have block sync aggregate"); + assert_eq!( + sync_aggregate.sync_committee_bits.num_set_bits(), + expected_bits + ); + + // Unset the first three bits of the first contribution and re-insert it. This should + // not affect the number of bits set in the sync aggregate. + first_contribution + .aggregation_bits + .set(0, false) + .expect("set bit"); + first_contribution + .aggregation_bits + .set(1, false) + .expect("set bit"); + first_contribution + .aggregation_bits + .set(2, false) + .expect("set bit"); + op_pool + .insert_sync_contribution(first_contribution) + .unwrap(); + + // The sync aggregate should still have the same number of set bits. + let sync_aggregate = op_pool + .get_sync_aggregate(&state) + .expect("Should calculate the sync aggregate") + .expect("Should have block sync aggregate"); + assert_eq!( + sync_aggregate.sync_committee_bits.num_set_bits(), + expected_bits + ); + } } diff --git a/beacon_node/operation_pool/src/max_cover.rs b/beacon_node/operation_pool/src/max_cover.rs index be0d4f746f3..9f9adbb8216 100644 --- a/beacon_node/operation_pool/src/max_cover.rs +++ b/beacon_node/operation_pool/src/max_cover.rs @@ -121,7 +121,7 @@ mod test { } fn covering_set(&self) -> &Self { - &self + self } fn update_covering_set(&mut self, _: &Self, other: &Self) { diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index 306f05e2d82..70999c53a58 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -1,5 +1,8 @@ use crate::attestation_id::AttestationId; +use crate::sync_aggregate_id::SyncAggregateId; +use crate::OpPoolError; use crate::OperationPool; +use derivative::Derivative; use parking_lot::RwLock; use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, Encode}; @@ -7,17 +10,32 @@ use ssz_derive::{Decode, Encode}; use store::{DBColumn, Error as StoreError, StoreItem}; use types::*; +type PersistedSyncContributions = Vec<(SyncAggregateId, Vec>)>; + /// SSZ-serializable version of `OperationPool`. /// /// Operations are stored in arbitrary order, so it's not a good idea to compare instances /// of this type (or its encoded form) for equality. Convert back to an `OperationPool` first. -#[derive(Clone, PartialEq, Debug, Encode, Decode, Serialize, Deserialize)] +#[superstruct( + variants(Base, Altair), + variant_attributes( + derive(Derivative, PartialEq, Debug, Serialize, Deserialize, Encode, Decode), + serde(bound = "T: EthSpec", deny_unknown_fields), + derivative(Clone), + ), + partial_getter_error(ty = "OpPoolError", expr = "OpPoolError::IncorrectOpPoolVariant") +)] +#[derive(PartialEq, Debug, Serialize, Deserialize, Encode)] +#[serde(untagged)] #[serde(bound = "T: EthSpec")] pub struct PersistedOperationPool { /// Mapping from attestation ID to attestation mappings. // We could save space by not storing the attestation ID, but it might // be difficult to make that roundtrip due to eager aggregation. attestations: Vec<(AttestationId, Vec>)>, + /// Mapping from sync contribution ID to sync contributions and aggregate. + #[superstruct(only(Altair))] + sync_contributions: PersistedSyncContributions, /// Attester slashings. attester_slashings: Vec<(AttesterSlashing, ForkVersion)>, /// Proposer slashings. @@ -27,7 +45,9 @@ pub struct PersistedOperationPool { } impl PersistedOperationPool { - /// Convert an `OperationPool` into serializable form. + /// Convert an `OperationPool` into serializable form. Always converts to + /// `PersistedOperationPool::Altair` because the v3 to v4 database schema migration ensures + /// the op pool is always persisted as the Altair variant. pub fn from_operation_pool(operation_pool: &OperationPool) -> Self { let attestations = operation_pool .attestations @@ -36,6 +56,13 @@ impl PersistedOperationPool { .map(|(att_id, att)| (att_id.clone(), att.clone())) .collect(); + let sync_contributions = operation_pool + .sync_contributions + .read() + .iter() + .map(|(id, contribution)| (id.clone(), contribution.clone())) + .collect(); + let attester_slashings = operation_pool .attester_slashings .read() @@ -57,42 +84,82 @@ impl PersistedOperationPool { .map(|(_, exit)| exit.clone()) .collect(); - Self { + PersistedOperationPool::Altair(PersistedOperationPoolAltair { attestations, + sync_contributions, attester_slashings, proposer_slashings, voluntary_exits, - } + }) } - /// Reconstruct an `OperationPool`. - pub fn into_operation_pool(self) -> OperationPool { - let attestations = RwLock::new(self.attestations.into_iter().collect()); - let attester_slashings = RwLock::new(self.attester_slashings.into_iter().collect()); + /// Reconstruct an `OperationPool`. Sets `sync_contributions` to its `Default` if `self` matches + /// `PersistedOperationPool::Base`. + pub fn into_operation_pool(self) -> Result, OpPoolError> { + let attestations = RwLock::new(self.attestations().to_vec().into_iter().collect()); + let attester_slashings = + RwLock::new(self.attester_slashings().to_vec().into_iter().collect()); let proposer_slashings = RwLock::new( - self.proposer_slashings + self.proposer_slashings() + .to_vec() .into_iter() .map(|slashing| (slashing.signed_header_1.message.proposer_index, slashing)) .collect(), ); let voluntary_exits = RwLock::new( - self.voluntary_exits + self.voluntary_exits() + .to_vec() .into_iter() .map(|exit| (exit.message.validator_index, exit)) .collect(), ); + let op_pool = match self { + PersistedOperationPool::Base(_) => OperationPool { + attestations, + sync_contributions: <_>::default(), + attester_slashings, + proposer_slashings, + voluntary_exits, + _phantom: Default::default(), + }, + PersistedOperationPool::Altair(_) => { + let sync_contributions = + RwLock::new(self.sync_contributions()?.to_vec().into_iter().collect()); - OperationPool { - attestations, - attester_slashings, - proposer_slashings, - voluntary_exits, - _phantom: Default::default(), + OperationPool { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + _phantom: Default::default(), + } + } + }; + Ok(op_pool) + } + + /// Convert the `PersistedOperationPool::Base` variant to `PersistedOperationPool::Altair` by + /// setting `sync_contributions` to its default. + pub fn base_to_altair(self) -> Self { + match self { + PersistedOperationPool::Base(_) => { + PersistedOperationPool::Altair(PersistedOperationPoolAltair { + attestations: self.attestations().to_vec(), + sync_contributions: <_>::default(), + attester_slashings: self.attester_slashings().to_vec(), + proposer_slashings: self.proposer_slashings().to_vec(), + voluntary_exits: self.voluntary_exits().to_vec(), + }) + } + PersistedOperationPool::Altair(_) => self, } } } -impl StoreItem for PersistedOperationPool { +/// This `StoreItem` implementation is necessary for migrating the `PersistedOperationPool` +/// in the v3 to v4 database schema migration. +impl StoreItem for PersistedOperationPoolBase { fn db_column() -> DBColumn { DBColumn::OpPool } @@ -105,3 +172,23 @@ impl StoreItem for PersistedOperationPool { Self::from_ssz_bytes(bytes).map_err(Into::into) } } + +/// Deserialization for `PersistedOperationPool` defaults to `PersistedOperationPool::Altair` +/// because the v3 to v4 database schema migration ensures the persisted op pool is always stored +/// in the Altair format. +impl StoreItem for PersistedOperationPool { + fn db_column() -> DBColumn { + DBColumn::OpPool + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + // Default deserialization to the Altair variant. + PersistedOperationPoolAltair::from_ssz_bytes(bytes) + .map(Self::Altair) + .map_err(Into::into) + } +} diff --git a/beacon_node/operation_pool/src/sync_aggregate_id.rs b/beacon_node/operation_pool/src/sync_aggregate_id.rs new file mode 100644 index 00000000000..401e0c5f82f --- /dev/null +++ b/beacon_node/operation_pool/src/sync_aggregate_id.rs @@ -0,0 +1,21 @@ +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use types::{Hash256, Slot}; + +/// Used to key `SyncAggregate`s in the `naive_sync_aggregation_pool`. +#[derive( + PartialEq, Eq, Clone, Hash, Debug, PartialOrd, Ord, Encode, Decode, Serialize, Deserialize, +)] +pub struct SyncAggregateId { + pub slot: Slot, + pub beacon_block_root: Hash256, +} + +impl SyncAggregateId { + pub fn new(slot: Slot, beacon_block_root: Hash256) -> Self { + Self { + slot, + beacon_block_root, + } + } +} diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 6677e6a090f..40b9ced5841 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -264,7 +264,7 @@ pub fn get_config( /* * Load the eth2 network dir to obtain some additional config values. */ - let eth2_network_config = get_eth2_network_config(&cli_args)?; + let eth2_network_config = get_eth2_network_config(cli_args)?; client_config.eth1.deposit_contract_address = format!("{:?}", spec.deposit_contract_address); client_config.eth1.deposit_contract_deploy_block = diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 38234273756..793194d26f9 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -305,7 +305,7 @@ impl, Cold: ItemStore> HotColdDB pub fn put_state(&self, state_root: &Hash256, state: &BeaconState) -> Result<(), Error> { let mut ops: Vec = Vec::new(); if state.slot() < self.get_split_slot() { - self.store_cold_state(state_root, &state, &mut ops)?; + self.store_cold_state(state_root, state, &mut ops)?; self.cold_db.do_atomically(ops) } else { self.store_hot_state(state_root, state, &mut ops)?; @@ -563,7 +563,7 @@ impl, Cold: ItemStore> HotColdDB "slot" => state.slot().as_u64(), "state_root" => format!("{:?}", state_root) ); - store_full_state(state_root, &state, ops)?; + store_full_state(state_root, state, ops)?; } // Store a summary of the state. @@ -861,7 +861,7 @@ impl, Cold: ItemStore> HotColdDB per_block_processing( &mut state, - &block, + block, None, BlockSignatureStrategy::NoVerification, &self.spec, diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 045b87eba15..2190806cd1d 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -271,7 +271,7 @@ mod tests { fn simplediskdb() { let dir = tempdir().unwrap(); let path = dir.path(); - let store = LevelDB::open(&path).unwrap(); + let store = LevelDB::open(path).unwrap(); test_impl(store); } diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index 45d159c0849..b9066240462 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -2,7 +2,7 @@ use crate::{DBColumn, Error, StoreItem}; use ssz::{Decode, Encode}; use types::{Checkpoint, Hash256}; -pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(3); +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(4); // All the keys that get stored under the `BeaconMeta` column. // diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index a6cd02bbc37..cf3863c9344 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -6,6 +6,7 @@ use crate::{get_key_for_col, DBColumn, Error, KeyValueStore, KeyValueStoreOp}; use ssz::{Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; use std::convert::TryInto; +use std::sync::Arc; use types::superstruct; use types::*; @@ -85,9 +86,9 @@ where // Light-client sync committees #[superstruct(only(Altair))] - pub current_sync_committee: SyncCommittee, + pub current_sync_committee: Arc>, #[superstruct(only(Altair))] - pub next_sync_committee: SyncCommittee, + pub next_sync_committee: Arc>, } /// Implement the conversion function from BeaconState -> PartialBeaconState. diff --git a/beacon_node/timer/Cargo.toml b/beacon_node/timer/Cargo.toml index d45a214b388..9da65f9dbfc 100644 --- a/beacon_node/timer/Cargo.toml +++ b/beacon_node/timer/Cargo.toml @@ -8,7 +8,7 @@ edition = "2018" beacon_chain = { path = "../beacon_chain" } types = { path = "../../consensus/types" } slot_clock = { path = "../../common/slot_clock" } -tokio = { version = "1.1.0", features = ["full"] } +tokio = { version = "1.7.1", features = ["full"] } slog = "2.5.2" parking_lot = "0.11.0" futures = "0.3.7" diff --git a/book/src/api-bn.md b/book/src/api-bn.md index 5492b22fa9e..2128d94f271 100644 --- a/book/src/api-bn.md +++ b/book/src/api-bn.md @@ -3,8 +3,6 @@ Lighthouse implements the standard [Eth2 Beacon Node API specification][OpenAPI]. Please follow that link for a full description of each API endpoint. -> **Warning:** the standard API specification is still in flux and the Lighthouse implementation is partially incomplete. You can track the status of each endpoint at [#1434](https://github.com/sigp/lighthouse/issues/1434). - ## Starting the server A Lighthouse beacon node can be configured to expose a HTTP server by supplying the `--http` flag. The default listen address is `127.0.0.1:5052`. @@ -14,7 +12,8 @@ The following CLI flags control the HTTP server: - `--http`: enable the HTTP server (required even if the following flags are provided). - `--http-port`: specify the listen port of the server. -- `--http-address`: specify the listen address of the server. +- `--http-address`: specify the listen address of the server. It is _not_ recommended to listen + on `0.0.0.0`, please see [Security](#security) below. - `--http-allow-origin`: specify the value of the `Access-Control-Allow-Origin` header. The default is to not supply a header. @@ -22,7 +21,34 @@ The schema of the API aligns with the standard Eth2 Beacon Node API as defined at [github.com/ethereum/eth2.0-APIs](https://github.com/ethereum/eth2.0-APIs). An interactive specification is available [here][OpenAPI]. -### CLI Example +## Security + +**Do not** expose the beacon node API to the public internet or you will open your node to +denial-of-service (DoS) attacks. + +The API includes several endpoints which can be used to trigger heavy processing, and as +such it is strongly recommended to restrict how it is accessed. Using `--http-address` to change +the listening address from `localhost` should only be done with extreme care. + +To safely provide access to the API from a different machine you should use one of the following +standard techniques: + +* Use an [SSH tunnel][ssh_tunnel], i.e. access `localhost` remotely. This is recommended, and + doesn't require setting `--http-address`. +* Use a firewall to limit access to certain remote IPs, e.g. allow access only from one other + machine on the local network. +* Shield Lighthouse behind an HTTP server with rate-limiting such as NGINX. This is only + recommended for advanced users, e.g. beacon node hosting providers. + +Additional risks to be aware of include: + +* The `node/identity` and `node/peers` endpoints expose information about your node's peer-to-peer + identity. +* The `--http-allow-origin` flag changes the server's CORS policy, allowing cross-site requests + from browsers. You should only supply it if you understand the risks, e.g. malicious websites + accessing your beacon node if you use the same machine for staking and web browsing. + +## CLI Example Start the beacon node with the HTTP server listening on [http://localhost:5052](http://localhost:5052): @@ -128,3 +154,4 @@ lighthouse bn --http --http-allow-origin "*" > Only use it in production if you understand the risks of a loose CORS policy. [OpenAPI]: https://ethereum.github.io/eth2.0-APIs/#/ +[ssh_tunnel]: https://www.ssh.com/academy/ssh/tunneling/example diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index dab55fb4701..bc19d18281e 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -329,3 +329,28 @@ curl -X GET "http://localhost:5052/lighthouse/beacon/states/0/ssz" | jq ``` *Example omitted for brevity, the body simply contains SSZ bytes.* + +### `/lighthouse/liveness` + +POST request that checks if any of the given validators have attested in the given epoch. Returns a list +of objects, each including the validator index, epoch, and `is_live` status of a requested validator. + +This endpoint is used in doppelganger detection, and will only provide accurate information for the +current, previous, or next epoch. + + +```bash +curl -X POST "http://localhost:5052/lighthouse/liveness" -d '{"indices":["0","1"],"epoch":"1"}' -H "content-type: application/json" | jq +``` + +```json +{ + "data": [ + { + "index": "0", + "epoch": "1", + "is_live": true + } + ] +} +``` \ No newline at end of file diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index 033f5d76570..58ad76ce5ab 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -351,7 +351,7 @@ Typical Responses | 200 "checksum": { "function": "sha256", "params": { - + }, "message": "abadc1285fd38b24a98ac586bda5b17a8f93fc1ff0778803dc32049578981236" }, diff --git a/book/src/api-vc.md b/book/src/api-vc.md index 0a8941edaf7..6ee79d4f720 100644 --- a/book/src/api-vc.md +++ b/book/src/api-vc.md @@ -17,18 +17,21 @@ The following CLI flags control the HTTP server: - `--http`: enable the HTTP server (required even if the following flags are provided). +- `--http-address`: specify the listen address of the server. It is almost always unsafe to use a non-default HTTP listen address. Use with caution. See the **Security** section below for more information. - `--http-port`: specify the listen port of the server. - `--http-allow-origin`: specify the value of the `Access-Control-Allow-Origin` header. The default is to not supply a header. ## Security -The validator client HTTP is **not encrypted** (i.e., it is **not HTTPS**). For -this reason, it will only listen on `127.0.0.1`. +The validator client HTTP server is **not encrypted** (i.e., it is **not HTTPS**). For +this reason, it will listen by default on `127.0.0.1`. It is unsafe to expose the validator client to the public Internet without additional transport layer security (e.g., HTTPS via nginx, SSH tunnels, etc.). +For custom setups, such as certain Docker configurations, a custom HTTP listen address can be used by passing the `--http-address` and `--unencrypted-http-transport` flags. The `--unencrypted-http-transport` flag is a safety flag which is required to ensure the user is aware of the potential risks when using a non-default listen address. + ### CLI Example Start the validator client with the HTTP server listening on [http://localhost:5062](http://localhost:5062): diff --git a/book/src/faq.md b/book/src/faq.md index d020ac218e5..edd580a5316 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -161,7 +161,7 @@ See [here.](./installation-source.md#troubleshooting) ### What is "Syncing eth1 block cache" ``` -Nov 30 21:04:28.268 WARN Syncing eth1 block cache est_blocks_remaining: initializing deposits, msg: sync can take longer when using remote eth1 nodes, service: slot_notifier +Nov 30 21:04:28.268 WARN Syncing eth1 block cache est_blocks_remaining: initializing deposits, service: slot_notifier ``` This log indicates that your beacon node is downloading blocks and deposits diff --git a/book/src/redundancy.md b/book/src/redundancy.md index 2c07c90bb2a..989d3c11614 100644 --- a/book/src/redundancy.md +++ b/book/src/redundancy.md @@ -61,7 +61,7 @@ following flags: `5052`). This is only required if your backup node is on a different host. - `--subscribe-all-subnets`: ensures that the beacon node subscribes to *all* subnets, not just on-demand requests from validators. -- `--process-all-attestations`: ensures that the beacon node performs +- `--import-all-attestations`: ensures that the beacon node performs aggregation on all seen attestations. Subsequently, one could use the following command to provide a backup beacon @@ -72,12 +72,12 @@ lighthouse bn \ --staking \ --http-address 0.0.0.0 \ --subscribe-all-subnets \ - --process-all-attestations + --import-all-attestations ``` ### Resource usage of redundant Beacon Nodes -The `--subscribe-all-subnets` and `--process-all-attestations` flags typically +The `--subscribe-all-subnets` and `--import-all-attestations` flags typically cause a significant increase in resource consumption. A doubling in CPU utilization and RAM consumption is expected. diff --git a/book/src/validator-inclusion.md b/book/src/validator-inclusion.md index ce8e61cafee..72e2e379c72 100644 --- a/book/src/validator-inclusion.md +++ b/book/src/validator-inclusion.md @@ -52,14 +52,9 @@ The following fields are returned: - `current_epoch_active_gwei`: the total staked gwei that was active (i.e., able to vote) during the current epoch. -- `current_epoch_attesting_gwei`: the total staked gwei that had one or more - attestations included in a block during the current epoch (multiple - attestations by the same validator do not increase this figure). - `current_epoch_target_attesting_gwei`: the total staked gwei that attested to - the majority-elected Casper FFG target epoch during the current epoch. This - figure must be equal to or less than `current_epoch_attesting_gwei`. -- `previous_epoch_active_gwei`: as above, but during the previous epoch. -- `previous_epoch_attesting_gwei`: see `current_epoch_attesting_gwei`. + the majority-elected Casper FFG target epoch during the current epoch. +- `previous_epoch_active_gwei`: as per `current_epoch_active_gwei`, but during the previous epoch. - `previous_epoch_target_attesting_gwei`: see `current_epoch_target_attesting_gwei`. - `previous_epoch_head_attesting_gwei`: the total staked gwei that attested to a head beacon block that is in the canonical chain. @@ -91,9 +86,7 @@ curl -X GET "http://localhost:5052/lighthouse/validator_inclusion/0/global" -H "data": { "current_epoch_active_gwei": 642688000000000, "previous_epoch_active_gwei": 642688000000000, - "current_epoch_attesting_gwei": 366208000000000, "current_epoch_target_attesting_gwei": 366208000000000, - "previous_epoch_attesting_gwei": 1000000000, "previous_epoch_target_attesting_gwei": 1000000000, "previous_epoch_head_attesting_gwei": 1000000000 } @@ -121,12 +114,10 @@ curl -X GET "http://localhost:5052/lighthouse/validator_inclusion/0/42" -H "acc "data": { "is_slashed": false, "is_withdrawable_in_current_epoch": false, - "is_active_in_current_epoch": true, - "is_active_in_previous_epoch": true, + "is_active_unslashed_in_current_epoch": true, + "is_active_unslashed_in_previous_epoch": true, "current_epoch_effective_balance_gwei": 32000000000, - "is_current_epoch_attester": false, "is_current_epoch_target_attester": false, - "is_previous_epoch_attester": false, "is_previous_epoch_target_attester": false, "is_previous_epoch_head_attester": false } diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 897fa2f3bc0..bb4dff5f8c6 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -13,7 +13,7 @@ eth2_network_config = { path = "../common/eth2_network_config" } eth2_ssz = "0.1.2" slog = "2.5.2" sloggers = "1.0.1" -tokio = "1.1.0" +tokio = "1.7.1" log = "0.4.11" slog-term = "2.6.0" logging = { path = "../common/logging" } diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index 54f450a54ee..f5869a7e8a0 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -29,7 +29,7 @@ impl TryFrom<&ArgMatches<'_>> for BootNodeConfig { let data_dir = get_data_dir(matches); // Try and grab network config from input CLI params - let eth2_network_config = get_eth2_network_config(&matches)?; + let eth2_network_config = get_eth2_network_config(matches)?; // Try and obtain bootnodes diff --git a/boot_node/src/server.rs b/boot_node/src/server.rs index b1efd805ce7..ed563504b0b 100644 --- a/boot_node/src/server.rs +++ b/boot_node/src/server.rs @@ -89,6 +89,7 @@ pub async fn run(config: BootNodeConfig, log: slog::Logger) { // Ignore these events here } Discv5Event::EnrAdded { .. } => {} // Ignore + Discv5Event::TalkRequest(_) => {} // Ignore Discv5Event::NodeInserted { .. } => {} // Ignore Discv5Event::SocketUpdated(socket_addr) => { info!(log, "External socket address updated"; "socket_addr" => format!("{:?}", socket_addr)); diff --git a/common/account_utils/src/lib.rs b/common/account_utils/src/lib.rs index 0ac8c8151c6..987c2aa27fe 100644 --- a/common/account_utils/src/lib.rs +++ b/common/account_utils/src/lib.rs @@ -73,14 +73,14 @@ pub fn write_file_via_temporary( // If the file already exists, preserve its permissions by copying it. // Otherwise, create a new file with restricted permissions. if file_path.exists() { - fs::copy(&file_path, &temp_path).map_err(FsError::UnableToCopyFile)?; - fs::write(&temp_path, &bytes).map_err(FsError::UnableToWriteFile)?; + fs::copy(file_path, temp_path).map_err(FsError::UnableToCopyFile)?; + fs::write(temp_path, bytes).map_err(FsError::UnableToWriteFile)?; } else { - create_with_600_perms(&temp_path, &bytes)?; + create_with_600_perms(temp_path, bytes)?; } // With the temporary file created, perform an atomic rename. - fs::rename(&temp_path, &file_path).map_err(FsError::UnableToRenameFile)?; + fs::rename(temp_path, file_path).map_err(FsError::UnableToRenameFile)?; Ok(()) } diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index 47aaff2155e..2aba4ff2e9c 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -405,7 +405,7 @@ mod tests { voting_keystore_path: "" voting_public_key: "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7" "#; - let def: ValidatorDefinition = serde_yaml::from_str(&no_graffiti).unwrap(); + let def: ValidatorDefinition = serde_yaml::from_str(no_graffiti).unwrap(); assert!(def.graffiti.is_none()); let invalid_graffiti = r#"--- @@ -417,7 +417,7 @@ mod tests { voting_public_key: "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7" "#; - let def: Result = serde_yaml::from_str(&invalid_graffiti); + let def: Result = serde_yaml::from_str(invalid_graffiti); assert!(def.is_err()); let valid_graffiti = r#"--- @@ -429,7 +429,7 @@ mod tests { voting_public_key: "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7" "#; - let def: ValidatorDefinition = serde_yaml::from_str(&valid_graffiti).unwrap(); + let def: ValidatorDefinition = serde_yaml::from_str(valid_graffiti).unwrap(); assert_eq!( def.graffiti, Some(GraffitiString::from_str("mrfwashere").unwrap()) diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index e6b69c9686e..2e684a6d7ae 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -17,7 +17,7 @@ proto_array = { path = "../../consensus/proto_array", optional = true } serde_utils = { path = "../../consensus/serde_utils" } zeroize = { version = "1.1.1", features = ["zeroize_derive"] } eth2_keystore = { path = "../../crypto/eth2_keystore" } -libsecp256k1 = "0.3.5" +libsecp256k1 = "0.5.0" ring = "0.16.19" bytes = "1.0.1" account_utils = { path = "../../common/account_utils" } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index ea4f013a1e2..08b4bcaead8 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -83,6 +83,7 @@ impl fmt::Display for Error { pub struct Timeouts { pub attestation: Duration, pub attester_duties: Duration, + pub liveness: Duration, pub proposal: Duration, pub proposer_duties: Duration, } @@ -92,6 +93,7 @@ impl Timeouts { Timeouts { attestation: timeout, attester_duties: timeout, + liveness: timeout, proposal: timeout, proposer_duties: timeout, } @@ -1103,6 +1105,30 @@ impl BeaconNodeHttpClient { .await } + /// `POST lighthouse/liveness` + pub async fn post_lighthouse_liveness( + &self, + ids: &[u64], + epoch: Epoch, + ) -> Result>, Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("liveness"); + + self.post_with_timeout_and_response( + path, + &LivenessRequestData { + indices: ids.to_vec(), + epoch, + }, + self.timeouts.liveness, + ) + .await + } + /// `POST validator/duties/attester/{epoch}` pub async fn post_validator_duties_attester( &self, diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 716ac41e523..70c5fa2b32e 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -32,13 +32,9 @@ pub struct GlobalValidatorInclusionData { pub current_epoch_active_gwei: u64, /// The total effective balance of all active validators during the _previous_ epoch. pub previous_epoch_active_gwei: u64, - /// The total effective balance of all validators who attested during the _current_ epoch. - pub current_epoch_attesting_gwei: u64, /// The total effective balance of all validators who attested during the _current_ epoch and /// agreed with the state about the beacon block at the first slot of the _current_ epoch. pub current_epoch_target_attesting_gwei: u64, - /// The total effective balance of all validators who attested during the _previous_ epoch. - pub previous_epoch_attesting_gwei: u64, /// The total effective balance of all validators who attested during the _previous_ epoch and /// agreed with the state about the beacon block at the first slot of the _previous_ epoch. pub previous_epoch_target_attesting_gwei: u64, @@ -53,19 +49,15 @@ pub struct ValidatorInclusionData { pub is_slashed: bool, /// True if the validator can withdraw in the current epoch. pub is_withdrawable_in_current_epoch: bool, - /// True if the validator was active in the state's _current_ epoch. - pub is_active_in_current_epoch: bool, - /// True if the validator was active in the state's _previous_ epoch. - pub is_active_in_previous_epoch: bool, + /// True if the validator was active and not slashed in the state's _current_ epoch. + pub is_active_unslashed_in_current_epoch: bool, + /// True if the validator was active and not slashed in the state's _previous_ epoch. + pub is_active_unslashed_in_previous_epoch: bool, /// The validator's effective balance in the _current_ epoch. pub current_epoch_effective_balance_gwei: u64, - /// True if the validator had an attestation included in the _current_ epoch. - pub is_current_epoch_attester: bool, /// True if the validator's beacon block root attestation for the first slot of the _current_ /// epoch matches the block root known to the state. pub is_current_epoch_target_attester: bool, - /// True if the validator had an attestation included in the _previous_ epoch. - pub is_previous_epoch_attester: bool, /// True if the validator's beacon block root attestation for the first slot of the _previous_ /// epoch matches the block root known to the state. pub is_previous_epoch_target_attester: bool, diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index c6a12350987..ae1a28a8ba5 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -2,12 +2,12 @@ use super::{types::*, PK_LEN, SECRET_PREFIX}; use crate::Error; use account_utils::ZeroizeString; use bytes::Bytes; +use libsecp256k1::{Message, PublicKey, Signature}; use reqwest::{ header::{HeaderMap, HeaderValue}, IntoUrl, }; use ring::digest::{digest, SHA256}; -use secp256k1::{Message, PublicKey, Signature}; use sensitive_url::SensitiveUrl; use serde::{de::DeserializeOwned, Serialize}; @@ -35,7 +35,7 @@ pub fn parse_pubkey(secret: &str) -> Result { &secret[SECRET_PREFIX.len()..] }; - serde_utils::hex::decode(&secret) + serde_utils::hex::decode(secret) .map_err(|e| Error::InvalidSecret(format!("invalid hex: {:?}", e))) .and_then(|bytes| { if bytes.len() != PK_LEN { @@ -94,7 +94,7 @@ impl ValidatorClientHttpClient { .ok() .and_then(|bytes| { let sig = Signature::parse_der(&bytes).ok()?; - Some(secp256k1::verify(&message, &sig, &self.server_pubkey)) + Some(libsecp256k1::verify(&message, &sig, &self.server_pubkey)) }) .filter(|is_valid| *is_valid) .ok_or(Error::InvalidSignatureHeader)?; diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index d31407645d7..264f6c58702 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -847,6 +847,21 @@ impl FromStr for Accept { } } +#[derive(Debug, Serialize, Deserialize)] +pub struct LivenessRequestData { + pub epoch: Epoch, + #[serde(with = "serde_utils::quoted_u64_vec")] + pub indices: Vec, +} + +#[derive(PartialEq, Debug, Serialize, Deserialize)] +pub struct LivenessResponseData { + #[serde(with = "serde_utils::quoted_u64")] + pub index: u64, + pub epoch: Epoch, + pub is_live: bool, +} + #[cfg(test)] mod tests { use super::*; diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index b11f93672ef..bab813d19e9 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -19,4 +19,4 @@ serde_yaml = "0.8.13" types = { path = "../../consensus/types"} eth2_ssz = "0.1.2" eth2_config = { path = "../eth2_config"} -enr = { version = "0.5.0", features = ["ed25519", "k256"] } +enr = { version = "0.5.1", features = ["ed25519", "k256"] } diff --git a/common/eth2_wallet_manager/src/filesystem.rs b/common/eth2_wallet_manager/src/filesystem.rs index 8a7a19e362e..7c43199751c 100644 --- a/common/eth2_wallet_manager/src/filesystem.rs +++ b/common/eth2_wallet_manager/src/filesystem.rs @@ -16,8 +16,8 @@ pub enum Error { UnableToRemoveWallet(io::Error), UnableToCreateWallet(io::Error), UnableToReadWallet(io::Error), - JsonWriteError(WalletError), - JsonReadError(WalletError), + JsonWrite(WalletError), + JsonRead(WalletError), } /// Read a wallet with the given `uuid` from the `wallet_dir`. @@ -32,7 +32,7 @@ pub fn read>(wallet_dir: P, uuid: &Uuid) -> Result .create(false) .open(json_path) .map_err(Error::UnableToReadWallet) - .and_then(|f| Wallet::from_json_reader(f).map_err(Error::JsonReadError)) + .and_then(|f| Wallet::from_json_reader(f).map_err(Error::JsonRead)) } } @@ -84,7 +84,7 @@ pub fn create>(wallet_dir: P, wallet: &Wallet) -> Result<(), Erro .create_new(true) .open(json_path) .map_err(Error::UnableToCreateWallet) - .and_then(|f| wallet.to_json_writer(f).map_err(Error::JsonWriteError)) + .and_then(|f| wallet.to_json_writer(f).map_err(Error::JsonWrite)) } } diff --git a/common/hashset_delay/Cargo.toml b/common/hashset_delay/Cargo.toml index 80e5e9e2b6e..d07023ee15a 100644 --- a/common/hashset_delay/Cargo.toml +++ b/common/hashset_delay/Cargo.toml @@ -9,4 +9,4 @@ futures = "0.3.7" tokio-util = { version = "0.6.2", features = ["time"] } [dev-dependencies] -tokio = { version = "1.1.0", features = ["time", "rt-multi-thread", "macros"] } +tokio = { version = "1.7.1", features = ["time", "rt-multi-thread", "macros"] } diff --git a/common/lighthouse_metrics/src/lib.rs b/common/lighthouse_metrics/src/lib.rs index 0695cf07ac4..ba499bc5477 100644 --- a/common/lighthouse_metrics/src/lib.rs +++ b/common/lighthouse_metrics/src/lib.rs @@ -54,14 +54,14 @@ //! } //! ``` -use prometheus::{HistogramOpts, HistogramTimer, Opts}; +use prometheus::{HistogramOpts, Opts}; use std::time::Duration; use prometheus::core::{Atomic, GenericGauge, GenericGaugeVec}; pub use prometheus::{ proto::{Metric, MetricFamily, MetricType}, - Encoder, Gauge, GaugeVec, Histogram, HistogramVec, IntCounter, IntCounterVec, IntGauge, - IntGaugeVec, Result, TextEncoder, + Encoder, Gauge, GaugeVec, Histogram, HistogramTimer, HistogramVec, IntCounter, IntCounterVec, + IntGauge, IntGaugeVec, Result, TextEncoder, }; /// Collect all the metrics for reporting. diff --git a/common/logging/src/lib.rs b/common/logging/src/lib.rs index 35ef0a1ea79..f4bcd088d8b 100644 --- a/common/logging/src/lib.rs +++ b/common/logging/src/lib.rs @@ -140,9 +140,7 @@ impl<'a> slog_term::RecordDecorator for AlignedRecordDecorator<'a> { write!( self, "{}", - std::iter::repeat(' ') - .take(self.message_width - self.message_count) - .collect::() + " ".repeat(self.message_width - self.message_count) )?; self.message_active = false; self.message_count = 0; diff --git a/common/monitoring_api/Cargo.toml b/common/monitoring_api/Cargo.toml index 7ecaec20111..b95f33e8f50 100644 --- a/common/monitoring_api/Cargo.toml +++ b/common/monitoring_api/Cargo.toml @@ -10,7 +10,7 @@ edition = "2018" reqwest = { version = "0.11.0", features = ["json","stream"] } futures = "0.3.7" task_executor = { path = "../task_executor" } -tokio = "1.1.0" +tokio = "1.7.1" eth2 = {path = "../eth2"} serde_json = "1.0.58" serde = "1.0.116" diff --git a/common/monitoring_api/src/gather.rs b/common/monitoring_api/src/gather.rs index b761e5544b3..16965f43cdf 100644 --- a/common/monitoring_api/src/gather.rs +++ b/common/monitoring_api/src/gather.rs @@ -153,7 +153,7 @@ pub fn gather_metrics(metrics_map: &HashMap) -> Option"] edition = "2018" [dependencies] -tokio = { version = "1.1.0", features = ["rt"] } +tokio = { version = "1.7.1", features = ["rt"] } slog = "2.5.2" futures = "0.3.7" exit-future = "0.2.0" diff --git a/common/validator_dir/src/builder.rs b/common/validator_dir/src/builder.rs index bfdf35ff331..df54795f347 100644 --- a/common/validator_dir/src/builder.rs +++ b/common/validator_dir/src/builder.rs @@ -180,7 +180,7 @@ impl<'a> Builder<'a> { signature: Signature::empty().into(), }; - deposit_data.signature = deposit_data.create_signature(&voting_keypair.sk, &spec); + deposit_data.signature = deposit_data.create_signature(&voting_keypair.sk, spec); let deposit_data = encode_eth1_tx_data(&deposit_data).map_err(Error::UnableToEncodeDeposit)?; diff --git a/common/warp_utils/Cargo.toml b/common/warp_utils/Cargo.toml index 7b2ab637398..faf27906c6a 100644 --- a/common/warp_utils/Cargo.toml +++ b/common/warp_utils/Cargo.toml @@ -14,7 +14,7 @@ beacon_chain = { path = "../../beacon_node/beacon_chain" } state_processing = { path = "../../consensus/state_processing" } safe_arith = { path = "../../consensus/safe_arith" } serde = { version = "1.0.116", features = ["derive"] } -tokio = { version = "1.1.0", features = ["sync"] } +tokio = { version = "1.7.1", features = ["sync"] } headers = "0.3.2" lighthouse_metrics = { path = "../lighthouse_metrics" } lazy_static = "1.4.0" diff --git a/consensus/cached_tree_hash/src/impls.rs b/consensus/cached_tree_hash/src/impls.rs index 0e6bf61420e..0624bd20145 100644 --- a/consensus/cached_tree_hash/src/impls.rs +++ b/consensus/cached_tree_hash/src/impls.rs @@ -60,7 +60,7 @@ impl CachedTreeHash for FixedVector { arena: &mut CacheArena, cache: &mut TreeHashCache, ) -> Result { - cache.recalculate_merkle_root(arena, hash256_iter(&self)) + cache.recalculate_merkle_root(arena, hash256_iter(self)) } } @@ -79,7 +79,7 @@ impl CachedTreeHash for FixedVector { arena: &mut CacheArena, cache: &mut TreeHashCache, ) -> Result { - cache.recalculate_merkle_root(arena, u64_iter(&self)) + cache.recalculate_merkle_root(arena, u64_iter(self)) } } @@ -98,7 +98,7 @@ impl CachedTreeHash for VariableList { cache: &mut TreeHashCache, ) -> Result { Ok(mix_in_length( - &cache.recalculate_merkle_root(arena, hash256_iter(&self))?, + &cache.recalculate_merkle_root(arena, hash256_iter(self))?, self.len(), )) } @@ -120,7 +120,7 @@ impl CachedTreeHash for VariableList { cache: &mut TreeHashCache, ) -> Result { Ok(mix_in_length( - &cache.recalculate_merkle_root(arena, u64_iter(&self))?, + &cache.recalculate_merkle_root(arena, u64_iter(self))?, self.len(), )) } diff --git a/consensus/cached_tree_hash/src/lib.rs b/consensus/cached_tree_hash/src/lib.rs index d60c920c3ee..af333f26700 100644 --- a/consensus/cached_tree_hash/src/lib.rs +++ b/consensus/cached_tree_hash/src/lib.rs @@ -11,7 +11,6 @@ pub type CacheArena = cache_arena::CacheArena; pub use crate::cache::TreeHashCache; pub use crate::impls::int_log; use ethereum_types::H256 as Hash256; -use tree_hash::TreeHash; #[derive(Debug, PartialEq, Clone)] pub enum Error { @@ -34,7 +33,7 @@ impl From for Error { } /// Trait for types which can make use of a cache to accelerate calculation of their tree hash root. -pub trait CachedTreeHash: TreeHash { +pub trait CachedTreeHash { /// Create a new cache appropriate for use with values of this type. fn new_tree_hash_cache(&self, arena: &mut CacheArena) -> Cache; diff --git a/consensus/merkle_proof/src/lib.rs b/consensus/merkle_proof/src/lib.rs index 76acb3c042e..84f27bdb86c 100644 --- a/consensus/merkle_proof/src/lib.rs +++ b/consensus/merkle_proof/src/lib.rs @@ -270,11 +270,11 @@ mod tests { return TestResult::discard(); } - let leaves: Vec<_> = int_leaves.into_iter().map(H256::from_low_u64_be).collect(); + let leaves_iter = int_leaves.into_iter().map(H256::from_low_u64_be); let mut merkle_tree = MerkleTree::create(&[], depth); - let proofs_ok = leaves.into_iter().enumerate().all(|(i, leaf)| { + let proofs_ok = leaves_iter.enumerate().all(|(i, leaf)| { assert_eq!(merkle_tree.push_leaf(leaf, depth), Ok(())); let (stored_leaf, branch) = merkle_tree.generate_proof(i, depth); stored_leaf == leaf && verify_merkle_proof(leaf, &branch, depth, i, merkle_tree.hash()) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index c0d8500bd8a..1e8be3a2cd0 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -210,7 +210,7 @@ impl ProtoArray { .ok_or(Error::InvalidBestDescendant(best_descendant_index))?; // Perform a sanity check that the node is indeed valid to be the head. - if !self.node_is_viable_for_head(&best_node) { + if !self.node_is_viable_for_head(best_node) { return Err(Error::InvalidBestNode { start_root: *justified_root, justified_epoch: self.justified_epoch, @@ -321,7 +321,7 @@ impl ProtoArray { .get(parent_index) .ok_or(Error::InvalidNodeIndex(parent_index))?; - let child_leads_to_viable_head = self.node_leads_to_viable_head(&child)?; + let child_leads_to_viable_head = self.node_leads_to_viable_head(child)?; // These three variables are aliases to the three options that we may set the // `parent.best_child` and `parent.best_descendant` to. @@ -334,54 +334,54 @@ impl ProtoArray { ); let no_change = (parent.best_child, parent.best_descendant); - let (new_best_child, new_best_descendant) = - if let Some(best_child_index) = parent.best_child { - if best_child_index == child_index && !child_leads_to_viable_head { - // If the child is already the best-child of the parent but it's not viable for - // the head, remove it. - change_to_none - } else if best_child_index == child_index { - // If the child is the best-child already, set it again to ensure that the - // best-descendant of the parent is updated. - change_to_child - } else { - let best_child = self - .nodes - .get(best_child_index) - .ok_or(Error::InvalidBestDescendant(best_child_index))?; + let (new_best_child, new_best_descendant) = if let Some(best_child_index) = + parent.best_child + { + if best_child_index == child_index && !child_leads_to_viable_head { + // If the child is already the best-child of the parent but it's not viable for + // the head, remove it. + change_to_none + } else if best_child_index == child_index { + // If the child is the best-child already, set it again to ensure that the + // best-descendant of the parent is updated. + change_to_child + } else { + let best_child = self + .nodes + .get(best_child_index) + .ok_or(Error::InvalidBestDescendant(best_child_index))?; - let best_child_leads_to_viable_head = - self.node_leads_to_viable_head(&best_child)?; + let best_child_leads_to_viable_head = self.node_leads_to_viable_head(best_child)?; - if child_leads_to_viable_head && !best_child_leads_to_viable_head { - // The child leads to a viable head, but the current best-child doesn't. + if child_leads_to_viable_head && !best_child_leads_to_viable_head { + // The child leads to a viable head, but the current best-child doesn't. + change_to_child + } else if !child_leads_to_viable_head && best_child_leads_to_viable_head { + // The best child leads to a viable head, but the child doesn't. + no_change + } else if child.weight == best_child.weight { + // Tie-breaker of equal weights by root. + if child.root >= best_child.root { change_to_child - } else if !child_leads_to_viable_head && best_child_leads_to_viable_head { - // The best child leads to a viable head, but the child doesn't. + } else { no_change - } else if child.weight == best_child.weight { - // Tie-breaker of equal weights by root. - if child.root >= best_child.root { - change_to_child - } else { - no_change - } + } + } else { + // Choose the winner by weight. + if child.weight >= best_child.weight { + change_to_child } else { - // Choose the winner by weight. - if child.weight >= best_child.weight { - change_to_child - } else { - no_change - } + no_change } } - } else if child_leads_to_viable_head { - // There is no current best-child and the child is viable. - change_to_child - } else { - // There is no current best-child but the child is not viable. - no_change - }; + } + } else if child_leads_to_viable_head { + // There is no current best-child and the child is viable. + change_to_child + } else { + // There is no current best-child but the child is not viable. + no_change + }; let parent = self .nodes diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 3e27867410b..36bdab2dbef 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -148,8 +148,8 @@ impl ProtoArrayForkChoice { let deltas = compute_deltas( &self.proto_array.indices, &mut self.votes, - &old_balances, - &new_balances, + old_balances, + new_balances, ) .map_err(|e| format!("find_head compute_deltas failed: {:?}", e))?; diff --git a/consensus/ssz/src/decode/impls.rs b/consensus/ssz/src/decode/impls.rs index f074cd34184..faf90952bb4 100644 --- a/consensus/ssz/src/decode/impls.rs +++ b/consensus/ssz/src/decode/impls.rs @@ -2,6 +2,7 @@ use super::*; use core::num::NonZeroUsize; use ethereum_types::{H256, U128, U256}; use smallvec::SmallVec; +use std::sync::Arc; macro_rules! impl_decodable_for_uint { ($type: ident, $bit_size: expr) => { @@ -271,6 +272,20 @@ impl Decode for Option { } } +impl Decode for Arc { + fn is_ssz_fixed_len() -> bool { + T::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + T::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + T::from_ssz_bytes(bytes).map(Arc::new) + } +} + impl Decode for H256 { fn is_ssz_fixed_len() -> bool { true diff --git a/consensus/ssz/src/encode/impls.rs b/consensus/ssz/src/encode/impls.rs index 03b842144a8..217a81d2ec1 100644 --- a/consensus/ssz/src/encode/impls.rs +++ b/consensus/ssz/src/encode/impls.rs @@ -2,6 +2,7 @@ use super::*; use core::num::NonZeroUsize; use ethereum_types::{H256, U128, U256}; use smallvec::SmallVec; +use std::sync::Arc; macro_rules! impl_encodable_for_uint { ($type: ident, $bit_size: expr) => { @@ -231,6 +232,24 @@ impl Encode for Option { } } +impl Encode for Arc { + fn is_ssz_fixed_len() -> bool { + T::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + T::ssz_fixed_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.as_ref().ssz_append(buf) + } + + fn ssz_bytes_len(&self) -> usize { + self.as_ref().ssz_bytes_len() + } +} + macro_rules! impl_for_vec { ($type: ty) => { impl Encode for $type { diff --git a/consensus/ssz/tests/tests.rs b/consensus/ssz/tests/tests.rs index 16712fcb07e..bde6b214e59 100644 --- a/consensus/ssz/tests/tests.rs +++ b/consensus/ssz/tests/tests.rs @@ -9,7 +9,7 @@ mod round_trip { for item in items { let encoded = &item.as_ssz_bytes(); assert_eq!(item.ssz_bytes_len(), encoded.len()); - assert_eq!(T::from_ssz_bytes(&encoded), Ok(item)); + assert_eq!(T::from_ssz_bytes(encoded), Ok(item)); } } diff --git a/consensus/ssz_derive/src/lib.rs b/consensus/ssz_derive/src/lib.rs index 86327d94fff..20c534d3ad0 100644 --- a/consensus/ssz_derive/src/lib.rs +++ b/consensus/ssz_derive/src/lib.rs @@ -17,7 +17,7 @@ fn get_serializable_named_field_idents(struct_data: &syn::DataStruct) -> Vec<&sy .fields .iter() .filter_map(|f| { - if should_skip_serializing(&f) { + if should_skip_serializing(f) { None } else { Some(match &f.ident { @@ -36,7 +36,7 @@ fn get_serializable_field_types(struct_data: &syn::DataStruct) -> Vec<&syn::Type .fields .iter() .filter_map(|f| { - if should_skip_serializing(&f) { + if should_skip_serializing(f) { None } else { Some(&f.ty) diff --git a/consensus/ssz_types/src/bitfield.rs b/consensus/ssz_types/src/bitfield.rs index 89f1272a3d3..71a2401685b 100644 --- a/consensus/ssz_types/src/bitfield.rs +++ b/consensus/ssz_types/src/bitfield.rs @@ -267,6 +267,32 @@ impl Bitfield> { pub fn from_bytes(bytes: Vec) -> Result { Self::from_raw_bytes(bytes, Self::capacity()) } + + /// Compute the intersection of two fixed-length `Bitfield`s. + /// + /// Return a new fixed-length `Bitfield`. + pub fn intersection(&self, other: &Self) -> Self { + let mut result = Self::new(); + // Bitwise-and the bytes together, starting from the left of each vector. This takes care + // of masking out any entries beyond `min_len` as well, assuming the bitfield doesn't + // contain any set bits beyond its length. + for i in 0..result.bytes.len() { + result.bytes[i] = self.bytes[i] & other.bytes[i]; + } + result + } + + /// Compute the union of two fixed-length `Bitfield`s. + /// + /// Return a new fixed-length `Bitfield`. + pub fn union(&self, other: &Self) -> Self { + let mut result = Self::new(); + for i in 0..result.bytes.len() { + result.bytes[i] = + self.bytes.get(i).copied().unwrap_or(0) | other.bytes.get(i).copied().unwrap_or(0); + } + result + } } impl Default for Bitfield> { @@ -700,6 +726,58 @@ mod bitvector { assert!(BitVector16::from_ssz_bytes(&[1, 0b0000_0000, 0b0000_0000]).is_err()); } + #[test] + fn intersection() { + let a = BitVector16::from_raw_bytes(vec![0b1100, 0b0001], 16).unwrap(); + let b = BitVector16::from_raw_bytes(vec![0b1011, 0b1001], 16).unwrap(); + let c = BitVector16::from_raw_bytes(vec![0b1000, 0b0001], 16).unwrap(); + + assert_eq!(a.intersection(&b), c); + assert_eq!(b.intersection(&a), c); + assert_eq!(a.intersection(&c), c); + assert_eq!(b.intersection(&c), c); + assert_eq!(a.intersection(&a), a); + assert_eq!(b.intersection(&b), b); + assert_eq!(c.intersection(&c), c); + } + + #[test] + fn intersection_diff_length() { + let a = BitVector16::from_bytes(vec![0b0010_1110, 0b0010_1011]).unwrap(); + let b = BitVector16::from_bytes(vec![0b0010_1101, 0b0000_0001]).unwrap(); + let c = BitVector16::from_bytes(vec![0b0010_1100, 0b0000_0001]).unwrap(); + + assert_eq!(a.len(), 16); + assert_eq!(b.len(), 16); + assert_eq!(c.len(), 16); + assert_eq!(a.intersection(&b), c); + assert_eq!(b.intersection(&a), c); + } + + #[test] + fn union() { + let a = BitVector16::from_raw_bytes(vec![0b1100, 0b0001], 16).unwrap(); + let b = BitVector16::from_raw_bytes(vec![0b1011, 0b1001], 16).unwrap(); + let c = BitVector16::from_raw_bytes(vec![0b1111, 0b1001], 16).unwrap(); + + assert_eq!(a.union(&b), c); + assert_eq!(b.union(&a), c); + assert_eq!(a.union(&a), a); + assert_eq!(b.union(&b), b); + assert_eq!(c.union(&c), c); + } + + #[test] + fn union_diff_length() { + let a = BitVector16::from_bytes(vec![0b0010_1011, 0b0010_1110]).unwrap(); + let b = BitVector16::from_bytes(vec![0b0000_0001, 0b0010_1101]).unwrap(); + let c = BitVector16::from_bytes(vec![0b0010_1011, 0b0010_1111]).unwrap(); + + assert_eq!(a.len(), c.len()); + assert_eq!(a.union(&b), c); + assert_eq!(b.union(&a), c); + } + #[test] fn ssz_round_trip() { assert_round_trip(BitVector0::new()); diff --git a/consensus/ssz_types/src/fixed_vector.rs b/consensus/ssz_types/src/fixed_vector.rs index a60102aa88c..242e03cd854 100644 --- a/consensus/ssz_types/src/fixed_vector.rs +++ b/consensus/ssz_types/src/fixed_vector.rs @@ -364,7 +364,7 @@ mod test { fn ssz_round_trip(item: T) { let encoded = &item.as_ssz_bytes(); assert_eq!(item.ssz_bytes_len(), encoded.len()); - assert_eq!(T::from_ssz_bytes(&encoded), Ok(item)); + assert_eq!(T::from_ssz_bytes(encoded), Ok(item)); } #[test] diff --git a/consensus/ssz_types/src/variable_list.rs b/consensus/ssz_types/src/variable_list.rs index 6c58ac6901d..2e20d0c37cc 100644 --- a/consensus/ssz_types/src/variable_list.rs +++ b/consensus/ssz_types/src/variable_list.rs @@ -345,7 +345,7 @@ mod test { fn round_trip(item: T) { let encoded = &item.as_ssz_bytes(); assert_eq!(item.ssz_bytes_len(), encoded.len()); - assert_eq!(T::from_ssz_bytes(&encoded), Ok(item)); + assert_eq!(T::from_ssz_bytes(encoded), Ok(item)); } #[test] diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index 63f8b448686..2d1c43b5cd3 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -29,11 +29,14 @@ eth2_hashing = "0.1.0" int_to_bytes = { path = "../int_to_bytes" } smallvec = "1.6.1" arbitrary = { version = "0.4.6", features = ["derive"], optional = true } +lighthouse_metrics = { path = "../../common/lighthouse_metrics", optional = true } +lazy_static = { version = "1.4.0", optional = true } [features] -default = ["legacy-arith"] +default = ["legacy-arith", "metrics"] fake_crypto = ["bls/fake_crypto"] legacy-arith = ["types/legacy-arith"] +metrics = ["lighthouse_metrics", "lazy_static"] arbitrary-fuzz = [ "arbitrary", "types/arbitrary-fuzz", diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index bbc24534081..0628e9fbd29 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -34,7 +34,7 @@ pub fn initialize_beacon_state_from_eth1( .push_leaf(deposit.data.tree_hash_root()) .map_err(BlockProcessingError::MerkleTreeError)?; state.eth1_data_mut().deposit_root = deposit_tree.root(); - process_deposit(&mut state, &deposit, spec, true)?; + process_deposit(&mut state, deposit, spec, true)?; } process_activations(&mut state, spec)?; diff --git a/consensus/state_processing/src/lib.rs b/consensus/state_processing/src/lib.rs index 91959cd866b..18fee2e2c3b 100644 --- a/consensus/state_processing/src/lib.rs +++ b/consensus/state_processing/src/lib.rs @@ -14,6 +14,7 @@ #[macro_use] mod macros; +mod metrics; pub mod common; pub mod genesis; diff --git a/consensus/state_processing/src/metrics.rs b/consensus/state_processing/src/metrics.rs new file mode 100644 index 00000000000..ddfaae56403 --- /dev/null +++ b/consensus/state_processing/src/metrics.rs @@ -0,0 +1,26 @@ +#![cfg(feature = "metrics")] + +use lazy_static::lazy_static; +pub use lighthouse_metrics::*; + +lazy_static! { + /* + * Participation Metrics + */ + pub static ref PARTICIPATION_PREV_EPOCH_HEAD_ATTESTING_GWEI_TOTAL: Result = try_create_int_gauge( + "beacon_participation_prev_epoch_head_attesting_gwei_total", + "Total effective balance (gwei) of validators who attested to the head in the previous epoch" + ); + pub static ref PARTICIPATION_PREV_EPOCH_TARGET_ATTESTING_GWEI_TOTAL: Result = try_create_int_gauge( + "beacon_participation_prev_epoch_target_attesting_gwei_total", + "Total effective balance (gwei) of validators who attested to the target in the previous epoch" + ); + pub static ref PARTICIPATION_PREV_EPOCH_SOURCE_ATTESTING_GWEI_TOTAL: Result = try_create_int_gauge( + "beacon_participation_prev_epoch_source_attesting_gwei_total", + "Total effective balance (gwei) of validators who attested to the source in the previous epoch" + ); + pub static ref PARTICIPATION_PREV_EPOCH_ACTIVE_GWEI_TOTAL: Result = try_create_int_gauge( + "beacon_participation_prev_epoch_active_gwei_total", + "Total effective balance (gwei) of validators active in the previous epoch" + ); +} diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 41f85a88957..a38382d2680 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -2,6 +2,7 @@ use errors::{BlockOperationError, BlockProcessingError, HeaderInvalid}; use rayon::prelude::*; use safe_arith::{ArithError, SafeArith}; use signature_sets::{block_proposal_signature_set, get_pubkey_from_state, randao_signature_set}; +use std::borrow::Cow; use tree_hash::TreeHash; use types::*; @@ -102,6 +103,7 @@ pub fn per_block_processing( BlockSignatureVerifier::verify_entire_block( state, |i| get_pubkey_from_state(state, i), + |pk_bytes| pk_bytes.decompress().ok().map(Cow::Owned), signed_block, block_root, spec @@ -127,10 +129,16 @@ pub fn per_block_processing( process_randao(state, block, verify_signatures, spec)?; process_eth1_data(state, block.body().eth1_data())?; - process_operations(state, block.body(), verify_signatures, spec)?; + process_operations(state, block.body(), proposer_index, verify_signatures, spec)?; if let BeaconBlockRef::Altair(inner) = block { - process_sync_aggregate(state, &inner.body.sync_aggregate, proposer_index, spec)?; + process_sync_aggregate( + state, + &inner.body.sync_aggregate, + proposer_index, + verify_signatures, + spec, + )?; } Ok(()) diff --git a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs index 7c8714386c3..ac1e247e3fb 100644 --- a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs +++ b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs @@ -1,55 +1,44 @@ use crate::common::{altair::get_base_reward_per_increment, decrease_balance, increase_balance}; use crate::per_block_processing::errors::{BlockProcessingError, SyncAggregateInvalid}; +use crate::{signature_sets::sync_aggregate_signature_set, VerifySignatures}; use safe_arith::SafeArith; -use tree_hash::TreeHash; +use std::borrow::Cow; use types::consts::altair::{PROPOSER_WEIGHT, SYNC_REWARD_WEIGHT, WEIGHT_DENOMINATOR}; -use types::{BeaconState, ChainSpec, Domain, EthSpec, SigningData, SyncAggregate, Unsigned}; +use types::{BeaconState, ChainSpec, EthSpec, PublicKeyBytes, SyncAggregate, Unsigned}; pub fn process_sync_aggregate( state: &mut BeaconState, aggregate: &SyncAggregate, proposer_index: u64, + verify_signatures: VerifySignatures, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { - // Verify sync committee aggregate signature signing over the previous slot block root - let previous_slot = state.slot().saturating_sub(1u64); - let current_sync_committee = state.current_sync_committee()?.clone(); - let committee_pubkeys = ¤t_sync_committee.pubkeys; - let participant_pubkeys = committee_pubkeys - .iter() - .zip(aggregate.sync_committee_bits.iter()) - .flat_map(|(pubkey, bit)| { - if bit { - // FIXME(altair): accelerate pubkey decompression with a cache - Some(pubkey.decompress()) - } else { - None - } - }) - .collect::, _>>() - .map_err(|_| SyncAggregateInvalid::PubkeyInvalid)?; + // Verify sync committee aggregate signature signing over the previous slot block root + if verify_signatures.is_true() { + // This decompression could be avoided with a cache, but we're not likely + // to encounter this case in practice due to the use of pre-emptive signature + // verification (which uses the `ValidatorPubkeyCache`). + let decompressor = |pk_bytes: &PublicKeyBytes| pk_bytes.decompress().ok().map(Cow::Owned); - let domain = spec.get_domain( - previous_slot.epoch(T::slots_per_epoch()), - Domain::SyncCommittee, - &state.fork(), - state.genesis_validators_root(), - ); + // Check that the signature is over the previous block root. + let previous_slot = state.slot().saturating_sub(1u64); + let previous_block_root = *state.get_block_root(previous_slot)?; - let signing_root = SigningData { - object_root: *state.get_block_root(previous_slot)?, - domain, - } - .tree_hash_root(); + let signature_set = sync_aggregate_signature_set( + decompressor, + aggregate, + state.slot(), + previous_block_root, + state, + spec, + )?; - let pubkey_refs = participant_pubkeys.iter().collect::>(); - if !aggregate - .sync_committee_signature - .eth2_fast_aggregate_verify(signing_root, &pubkey_refs) - { - return Err(SyncAggregateInvalid::SignatureInvalid.into()); + // If signature set is `None` then the signature is valid (infinity). + if signature_set.map_or(false, |signature| !signature.verify()) { + return Err(SyncAggregateInvalid::SignatureInvalid.into()); + } } // Compute participant and proposer rewards diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index 3a1f6002c3b..e2a019fcc51 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -3,7 +3,7 @@ use super::signature_sets::{Error as SignatureSetError, *}; use crate::common::get_indexed_attestation; use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError}; -use bls::{verify_signature_sets, PublicKey, SignatureSet}; +use bls::{verify_signature_sets, PublicKey, PublicKeyBytes, SignatureSet}; use rayon::prelude::*; use std::borrow::Cow; use types::{ @@ -63,27 +63,36 @@ impl From> for Error { /// /// This allows for optimizations related to batch BLS operations (see the /// `Self::verify_entire_block(..)` function). -pub struct BlockSignatureVerifier<'a, T, F> +pub struct BlockSignatureVerifier<'a, T, F, D> where T: EthSpec, F: Fn(usize) -> Option> + Clone, + D: Fn(&'a PublicKeyBytes) -> Option>, { get_pubkey: F, + decompressor: D, state: &'a BeaconState, spec: &'a ChainSpec, sets: Vec>, } -impl<'a, T, F> BlockSignatureVerifier<'a, T, F> +impl<'a, T, F, D> BlockSignatureVerifier<'a, T, F, D> where T: EthSpec, F: Fn(usize) -> Option> + Clone, + D: Fn(&'a PublicKeyBytes) -> Option>, { /// Create a new verifier without any included signatures. See the `include...` functions to /// add signatures, and the `verify` - pub fn new(state: &'a BeaconState, get_pubkey: F, spec: &'a ChainSpec) -> Self { + pub fn new( + state: &'a BeaconState, + get_pubkey: F, + decompressor: D, + spec: &'a ChainSpec, + ) -> Self { Self { get_pubkey, + decompressor, state, spec, sets: vec![], @@ -100,11 +109,12 @@ where pub fn verify_entire_block( state: &'a BeaconState, get_pubkey: F, + decompressor: D, block: &'a SignedBeaconBlock, block_root: Option, spec: &'a ChainSpec, ) -> Result<()> { - let mut verifier = Self::new(state, get_pubkey, spec); + let mut verifier = Self::new(state, get_pubkey, decompressor, spec); verifier.include_all_signatures(block, block_root)?; verifier.verify() } @@ -146,12 +156,7 @@ where block_root: Option, ) -> Result<()> { self.include_block_proposal(block, block_root)?; - self.include_randao_reveal(block)?; - self.include_proposer_slashings(block)?; - self.include_attester_slashings(block)?; - self.include_attestations(block)?; - // Deposits are not included because they can legally have invalid signatures. - self.include_exits(block)?; + self.include_all_signatures_except_proposal(block)?; Ok(()) } @@ -168,6 +173,7 @@ where self.include_attestations(block)?; // Deposits are not included because they can legally have invalid signatures. self.include_exits(block)?; + self.include_sync_aggregate(block)?; Ok(()) } @@ -238,10 +244,10 @@ where .iter() .try_for_each(|attester_slashing| { let (set_1, set_2) = attester_slashing_signature_sets( - &self.state, + self.state, self.get_pubkey.clone(), attester_slashing, - &self.spec, + self.spec, )?; self.sets.push(set_1); @@ -274,11 +280,11 @@ where get_indexed_attestation(committee.committee, attestation)?; self.sets.push(indexed_attestation_signature_set( - &self.state, + self.state, self.get_pubkey.clone(), &attestation.signature, &indexed_attestation, - &self.spec, + self.spec, )?); vec.push(indexed_attestation); @@ -301,11 +307,28 @@ where .iter() .try_for_each(|exit| { let exit = - exit_signature_set(&self.state, self.get_pubkey.clone(), exit, &self.spec)?; + exit_signature_set(self.state, self.get_pubkey.clone(), exit, self.spec)?; self.sets.push(exit); Ok(()) }) } + + /// Include the signature of the block's sync aggregate (if it exists) for verification. + pub fn include_sync_aggregate(&mut self, block: &'a SignedBeaconBlock) -> Result<()> { + if let Some(sync_aggregate) = block.message().body().sync_aggregate() { + if let Some(signature_set) = sync_aggregate_signature_set( + &self.decompressor, + sync_aggregate, + block.slot(), + block.parent_root(), + self.state, + self.spec, + )? { + self.sets.push(signature_set); + } + } + Ok(()) + } } diff --git a/consensus/state_processing/src/per_block_processing/errors.rs b/consensus/state_processing/src/per_block_processing/errors.rs index 4ebf2a644a8..2ba9ea78c10 100644 --- a/consensus/state_processing/src/per_block_processing/errors.rs +++ b/consensus/state_processing/src/per_block_processing/errors.rs @@ -144,6 +144,7 @@ pub type HeaderValidationError = BlockOperationError; pub type AttesterSlashingValidationError = BlockOperationError; pub type ProposerSlashingValidationError = BlockOperationError; pub type AttestationValidationError = BlockOperationError; +pub type SyncCommitteeMessageValidationError = BlockOperationError; pub type DepositValidationError = BlockOperationError; pub type ExitValidationError = BlockOperationError; diff --git a/consensus/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs b/consensus/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs index c52abf31198..c63cf520054 100644 --- a/consensus/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs +++ b/consensus/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs @@ -44,7 +44,7 @@ pub fn is_valid_indexed_attestation( state, |i| get_pubkey_from_state(state, i), &indexed_attestation.signature, - &indexed_attestation, + indexed_attestation, spec )? .verify(), diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index d576396fb83..8ccfd0b2663 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -11,6 +11,7 @@ use types::consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_ pub fn process_operations<'a, T: EthSpec>( state: &mut BeaconState, block_body: BeaconBlockBodyRef<'a, T>, + proposer_index: u64, verify_signatures: VerifySignatures, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { @@ -26,7 +27,7 @@ pub fn process_operations<'a, T: EthSpec>( verify_signatures, spec, )?; - process_attestations(state, block_body, verify_signatures, spec)?; + process_attestations(state, block_body, proposer_index, verify_signatures, spec)?; process_deposits(state, block_body.deposits(), spec)?; process_exits(state, block_body.voluntary_exits(), verify_signatures, spec)?; Ok(()) @@ -85,6 +86,7 @@ pub mod altair { pub fn process_attestations( state: &mut BeaconState, attestations: &[Attestation], + proposer_index: u64, verify_signatures: VerifySignatures, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { @@ -92,7 +94,14 @@ pub mod altair { .iter() .enumerate() .try_for_each(|(i, attestation)| { - process_attestation(state, attestation, i, verify_signatures, spec) + process_attestation( + state, + attestation, + i, + proposer_index, + verify_signatures, + spec, + ) }) } @@ -100,6 +109,7 @@ pub mod altair { state: &mut BeaconState, attestation: &Attestation, att_index: usize, + proposer_index: u64, verify_signatures: VerifySignatures, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { @@ -145,9 +155,7 @@ pub mod altair { .safe_mul(WEIGHT_DENOMINATOR)? .safe_div(PROPOSER_WEIGHT)?; let proposer_reward = proposer_reward_numerator.safe_div(proposer_reward_denominator)?; - // FIXME(altair): optimise by passing in proposer_index - let proposer_index = state.get_beacon_proposer_index(state.slot(), spec)?; - increase_balance(state, proposer_index, proposer_reward)?; + increase_balance(state, proposer_index as usize, proposer_reward)?; Ok(()) } } @@ -169,7 +177,7 @@ pub fn process_proposer_slashings( .iter() .enumerate() .try_for_each(|(i, proposer_slashing)| { - verify_proposer_slashing(proposer_slashing, &state, verify_signatures, spec) + verify_proposer_slashing(proposer_slashing, state, verify_signatures, spec) .map_err(|e| e.into_with_index(i))?; slash_validator( @@ -194,11 +202,11 @@ pub fn process_attester_slashings( spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { for (i, attester_slashing) in attester_slashings.iter().enumerate() { - verify_attester_slashing(&state, &attester_slashing, verify_signatures, spec) + verify_attester_slashing(state, attester_slashing, verify_signatures, spec) .map_err(|e| e.into_with_index(i))?; let slashable_indices = - get_slashable_indices(&state, &attester_slashing).map_err(|e| e.into_with_index(i))?; + get_slashable_indices(state, attester_slashing).map_err(|e| e.into_with_index(i))?; for i in slashable_indices { slash_validator(state, i as usize, None, spec)?; @@ -212,6 +220,7 @@ pub fn process_attester_slashings( pub fn process_attestations<'a, T: EthSpec>( state: &mut BeaconState, block_body: BeaconBlockBodyRef<'a, T>, + proposer_index: u64, verify_signatures: VerifySignatures, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { @@ -223,6 +232,7 @@ pub fn process_attestations<'a, T: EthSpec>( altair::process_attestations( state, block_body.attestations(), + proposer_index, verify_signatures, spec, )?; @@ -244,7 +254,7 @@ pub fn process_exits( // Verify and apply each exit in series. We iterate in series because higher-index exits may // become invalid due to the application of lower-index ones. for (i, exit) in voluntary_exits.iter().enumerate() { - verify_exit(&state, exit, verify_signatures, spec).map_err(|e| e.into_with_index(i))?; + verify_exit(state, exit, verify_signatures, spec).map_err(|e| e.into_with_index(i))?; initiate_validator_exit(state, exit.message.validator_index as usize, spec)?; } diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index df8513e0ed8..7de7d7d99a0 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -8,9 +8,10 @@ use std::borrow::Cow; use tree_hash::TreeHash; use types::{ AggregateSignature, AttesterSlashing, BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, - DepositData, Domain, EthSpec, Fork, Hash256, InconsistentFork, IndexedAttestation, - ProposerSlashing, PublicKey, Signature, SignedAggregateAndProof, SignedBeaconBlock, - SignedBeaconBlockHeader, SignedRoot, SignedVoluntaryExit, SigningData, + DepositData, Domain, Epoch, EthSpec, Fork, Hash256, InconsistentFork, IndexedAttestation, + ProposerSlashing, PublicKey, PublicKeyBytes, Signature, SignedAggregateAndProof, + SignedBeaconBlock, SignedBeaconBlockHeader, SignedContributionAndProof, SignedRoot, + SignedVoluntaryExit, SigningData, Slot, SyncAggregate, SyncAggregatorSelectionData, Unsigned, }; pub type Result = std::result::Result; @@ -25,6 +26,9 @@ pub enum Error { /// Attempted to find the public key of a validator that does not exist. You cannot distinguish /// between an error and an invalid block in this case. ValidatorUnknown(u64), + /// Attempted to find the public key of a validator that does not exist. You cannot distinguish + /// between an error and an invalid block in this case. + ValidatorPubkeyUnknown(PublicKeyBytes), /// The `BeaconBlock` has a `proposer_index` that does not match the index we computed locally. /// /// The block is invalid. @@ -32,6 +36,8 @@ pub enum Error { /// The public keys supplied do not match the number of objects requiring keys. Block validity /// was not determined. MismatchedPublicKeyLen { pubkey_len: usize, other_len: usize }, + /// Pubkey decompression failed. The block is invalid. + PublicKeyDecompressionFailed, /// The public key bytes stored in the `BeaconState` were not valid. This is a serious internal /// error. BadBlsBytes { validator_index: u64 }, @@ -251,7 +257,7 @@ where let domain = spec.get_domain( indexed_attestation.data.target.epoch, Domain::BeaconAttester, - &fork, + fork, genesis_validators_root, ); @@ -396,3 +402,190 @@ where message, )) } + +pub fn signed_sync_aggregate_selection_proof_signature_set<'a, T, F>( + get_pubkey: F, + signed_contribution_and_proof: &'a SignedContributionAndProof, + fork: &Fork, + genesis_validators_root: Hash256, + spec: &'a ChainSpec, +) -> Result> +where + T: EthSpec, + F: Fn(usize) -> Option>, +{ + let slot = signed_contribution_and_proof.message.contribution.slot; + + let domain = spec.get_domain( + slot.epoch(T::slots_per_epoch()), + Domain::SyncCommitteeSelectionProof, + fork, + genesis_validators_root, + ); + let selection_data = SyncAggregatorSelectionData { + slot, + subcommittee_index: signed_contribution_and_proof + .message + .contribution + .subcommittee_index, + }; + let message = selection_data.signing_root(domain); + let signature = &signed_contribution_and_proof.message.selection_proof; + let validator_index = signed_contribution_and_proof.message.aggregator_index; + + Ok(SignatureSet::single_pubkey( + signature, + get_pubkey(validator_index as usize).ok_or(Error::ValidatorUnknown(validator_index))?, + message, + )) +} + +pub fn signed_sync_aggregate_signature_set<'a, T, F>( + get_pubkey: F, + signed_contribution_and_proof: &'a SignedContributionAndProof, + fork: &Fork, + genesis_validators_root: Hash256, + spec: &'a ChainSpec, +) -> Result> +where + T: EthSpec, + F: Fn(usize) -> Option>, +{ + let epoch = signed_contribution_and_proof + .message + .contribution + .slot + .epoch(T::slots_per_epoch()); + + let domain = spec.get_domain( + epoch, + Domain::ContributionAndProof, + fork, + genesis_validators_root, + ); + let message = signed_contribution_and_proof.message.signing_root(domain); + let signature = &signed_contribution_and_proof.signature; + let validator_index = signed_contribution_and_proof.message.aggregator_index; + + Ok(SignatureSet::single_pubkey( + signature, + get_pubkey(validator_index as usize).ok_or(Error::ValidatorUnknown(validator_index))?, + message, + )) +} + +#[allow(clippy::too_many_arguments)] +pub fn sync_committee_contribution_signature_set_from_pubkeys<'a, T, F>( + get_pubkey: F, + pubkey_bytes: &[PublicKeyBytes], + signature: &'a AggregateSignature, + epoch: Epoch, + beacon_block_root: Hash256, + fork: &Fork, + genesis_validators_root: Hash256, + spec: &'a ChainSpec, +) -> Result> +where + T: EthSpec, + F: Fn(&PublicKeyBytes) -> Option>, +{ + let mut pubkeys = Vec::with_capacity(T::SyncSubcommitteeSize::to_usize()); + for pubkey in pubkey_bytes { + pubkeys.push(get_pubkey(pubkey).ok_or_else(|| Error::ValidatorPubkeyUnknown(*pubkey))?); + } + + let domain = spec.get_domain(epoch, Domain::SyncCommittee, fork, genesis_validators_root); + + let message = beacon_block_root.signing_root(domain); + + Ok(SignatureSet::multiple_pubkeys(signature, pubkeys, message)) +} + +pub fn sync_committee_message_set_from_pubkeys<'a, T>( + pubkey: Cow<'a, PublicKey>, + signature: &'a AggregateSignature, + epoch: Epoch, + beacon_block_root: Hash256, + fork: &Fork, + genesis_validators_root: Hash256, + spec: &'a ChainSpec, +) -> Result> +where + T: EthSpec, +{ + let domain = spec.get_domain(epoch, Domain::SyncCommittee, fork, genesis_validators_root); + + let message = beacon_block_root.signing_root(domain); + + Ok(SignatureSet::single_pubkey(signature, pubkey, message)) +} + +/// Signature set verifier for a block's `sync_aggregate` (Altair and later). +/// +/// The `slot` should be the slot of the block that the sync aggregate is included in, which may be +/// different from `state.slot()`. The `block_root` should be the block root that the sync aggregate +/// signs over. It's passed in rather than extracted from the `state` because when verifying a batch +/// of blocks the `state` will not yet have had the blocks applied. +/// +/// Returns `Ok(None)` in the case where `sync_aggregate` has 0 signatures. The spec +/// uses a separate function `eth2_fast_aggregate_verify` for this, but we can equivalently +/// check the exceptional case eagerly and do a `fast_aggregate_verify` in the case where the +/// check fails (by returning `Some(signature_set)`). +pub fn sync_aggregate_signature_set<'a, T, D>( + decompressor: D, + sync_aggregate: &'a SyncAggregate, + slot: Slot, + block_root: Hash256, + state: &'a BeaconState, + spec: &ChainSpec, +) -> Result>> +where + T: EthSpec, + D: Fn(&'a PublicKeyBytes) -> Option>, +{ + // Allow the point at infinity to count as a signature for 0 validators as per + // `eth2_fast_aggregate_verify` from the spec. + if sync_aggregate.sync_committee_bits.is_zero() + && sync_aggregate.sync_committee_signature.is_infinity() + { + return Ok(None); + } + + let committee_pubkeys = &state + .get_built_sync_committee(slot.epoch(T::slots_per_epoch()), spec)? + .pubkeys; + + let participant_pubkeys = committee_pubkeys + .iter() + .zip(sync_aggregate.sync_committee_bits.iter()) + .filter_map(|(pubkey, bit)| { + if bit { + Some(decompressor(pubkey)) + } else { + None + } + }) + .collect::>>() + .ok_or(Error::PublicKeyDecompressionFailed)?; + + let previous_slot = slot.saturating_sub(1u64); + + let domain = spec.get_domain( + previous_slot.epoch(T::slots_per_epoch()), + Domain::SyncCommittee, + &state.fork(), + state.genesis_validators_root(), + ); + + let message = SigningData { + object_root: block_root, + domain, + } + .tree_hash_root(); + + Ok(Some(SignatureSet::multiple_pubkeys( + &sync_aggregate.sync_committee_signature, + participant_pubkeys, + message, + ))) +} diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index 63e57bddc6a..fe1537a50de 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -337,6 +337,7 @@ fn invalid_attestation_no_committee_for_index() { let result = process_operations::process_attestations( &mut state, head_block.body(), + head_block.proposer_index(), VerifySignatures::True, &spec, ); @@ -368,6 +369,7 @@ fn invalid_attestation_wrong_justified_checkpoint() { let result = process_operations::process_attestations( &mut state, head_block.body(), + head_block.proposer_index(), VerifySignatures::True, &spec, ); @@ -400,6 +402,7 @@ fn invalid_attestation_bad_aggregation_bitfield_len() { let result = process_operations::process_attestations( &mut state, head_block.body(), + head_block.proposer_index(), VerifySignatures::True, &spec, ); @@ -425,6 +428,7 @@ fn invalid_attestation_bad_signature() { let result = process_operations::process_attestations( &mut state, head_block.body(), + head_block.proposer_index(), VerifySignatures::True, &spec, ); @@ -456,6 +460,7 @@ fn invalid_attestation_included_too_early() { let result = process_operations::process_attestations( &mut state, head_block.body(), + head_block.proposer_index(), VerifySignatures::True, &spec, ); @@ -491,6 +496,7 @@ fn invalid_attestation_included_too_late() { let result = process_operations::process_attestations( &mut state, head_block.body(), + head_block.proposer_index(), VerifySignatures::True, &spec, ); @@ -522,6 +528,7 @@ fn invalid_attestation_target_epoch_slot_mismatch() { let result = process_operations::process_attestations( &mut state, head_block.body(), + head_block.proposer_index(), VerifySignatures::True, &spec, ); diff --git a/consensus/state_processing/src/per_block_processing/verify_attester_slashing.rs b/consensus/state_processing/src/per_block_processing/verify_attester_slashing.rs index e4a46c98054..709d99ec1ca 100644 --- a/consensus/state_processing/src/per_block_processing/verify_attester_slashing.rs +++ b/consensus/state_processing/src/per_block_processing/verify_attester_slashing.rs @@ -33,9 +33,9 @@ pub fn verify_attester_slashing( Invalid::NotSlashable ); - is_valid_indexed_attestation(state, &attestation_1, verify_signatures, spec) + is_valid_indexed_attestation(state, attestation_1, verify_signatures, spec) .map_err(|e| error(Invalid::IndexedAttestation1Invalid(e)))?; - is_valid_indexed_attestation(state, &attestation_2, verify_signatures, spec) + is_valid_indexed_attestation(state, attestation_2, verify_signatures, spec) .map_err(|e| error(Invalid::IndexedAttestation2Invalid(e)))?; Ok(()) diff --git a/consensus/state_processing/src/per_block_processing/verify_deposit.rs b/consensus/state_processing/src/per_block_processing/verify_deposit.rs index 0cedc564b2e..3b43a8b41b6 100644 --- a/consensus/state_processing/src/per_block_processing/verify_deposit.rs +++ b/consensus/state_processing/src/per_block_processing/verify_deposit.rs @@ -15,7 +15,7 @@ fn error(reason: DepositInvalid) -> BlockOperationError { /// /// Spec v0.12.1 pub fn verify_deposit_signature(deposit_data: &DepositData, spec: &ChainSpec) -> Result<()> { - let (public_key, signature, msg) = deposit_pubkey_signature_message(&deposit_data, spec) + let (public_key, signature, msg) = deposit_pubkey_signature_message(deposit_data, spec) .ok_or_else(|| error(DepositInvalid::BadBlsBytes))?; verify!( diff --git a/consensus/state_processing/src/per_epoch_processing.rs b/consensus/state_processing/src/per_epoch_processing.rs index 4c659cfff83..894da7ceedf 100644 --- a/consensus/state_processing/src/per_epoch_processing.rs +++ b/consensus/state_processing/src/per_epoch_processing.rs @@ -1,7 +1,6 @@ #![deny(clippy::wildcard_imports)] -// FIXME(altair): refactor to remove phase0/base structs, including `EpochProcessingSummary` -pub use base::{TotalBalances, ValidatorStatus, ValidatorStatuses}; +pub use epoch_processing_summary::EpochProcessingSummary; use errors::EpochProcessingError as Error; pub use registry_updates::process_registry_updates; use safe_arith::SafeArith; @@ -12,22 +11,15 @@ pub use weigh_justification_and_finalization::weigh_justification_and_finalizati pub mod altair; pub mod base; pub mod effective_balance_updates; +pub mod epoch_processing_summary; pub mod errors; pub mod historical_roots_update; pub mod registry_updates; pub mod resets; pub mod slashings; pub mod tests; -pub mod validator_statuses; pub mod weigh_justification_and_finalization; -/// Provides a summary of validator participation during the epoch. -#[derive(PartialEq, Debug)] -pub struct EpochProcessingSummary { - pub total_balances: TotalBalances, - pub statuses: Vec, -} - /// Performs per-epoch processing on some BeaconState. /// /// Mutates the given `BeaconState`, returning early if an error is encountered. If an error is diff --git a/consensus/state_processing/src/per_epoch_processing/altair.rs b/consensus/state_processing/src/per_epoch_processing/altair.rs index 79a72118cba..dd93ccab216 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair.rs @@ -3,10 +3,10 @@ use crate::per_epoch_processing::{ effective_balance_updates::process_effective_balance_updates, historical_roots_update::process_historical_roots_update, resets::{process_eth1_data_reset, process_randao_mixes_reset, process_slashings_reset}, - validator_statuses::ValidatorStatuses, }; pub use inactivity_updates::process_inactivity_updates; pub use justification_and_finalization::process_justification_and_finalization; +pub use participation_cache::ParticipationCache; pub use participation_flag_updates::process_participation_flag_updates; pub use rewards_and_penalties::process_rewards_and_penalties; pub use sync_committee_updates::process_sync_committee_updates; @@ -14,6 +14,7 @@ use types::{BeaconState, ChainSpec, EthSpec, RelativeEpoch}; pub mod inactivity_updates; pub mod justification_and_finalization; +pub mod participation_cache; pub mod participation_flag_updates; pub mod rewards_and_penalties; pub mod sync_committee_updates; @@ -27,13 +28,16 @@ pub fn process_epoch( state.build_committee_cache(RelativeEpoch::Current, spec)?; state.build_committee_cache(RelativeEpoch::Next, spec)?; + // Pre-compute participating indices and total balances. + let participation_cache = ParticipationCache::new(state, spec)?; + // Justification and finalization. - process_justification_and_finalization(state, spec)?; + process_justification_and_finalization(state, &participation_cache)?; - process_inactivity_updates(state, spec)?; + process_inactivity_updates(state, &participation_cache, spec)?; // Rewards and Penalties. - process_rewards_and_penalties(state, spec)?; + process_rewards_and_penalties(state, &participation_cache, spec)?; // Registry Updates. process_registry_updates(state, spec)?; @@ -41,7 +45,7 @@ pub fn process_epoch( // Slashings. process_slashings( state, - state.get_total_active_balance(spec)?, + participation_cache.current_epoch_total_active_balance(), spec.proportional_slashing_multiplier_altair, spec, )?; @@ -69,14 +73,7 @@ pub fn process_epoch( // Rotate the epoch caches to suit the epoch transition. state.advance_caches()?; - // FIXME(altair): this is an incorrect dummy value, we should think harder - // about how we want to unify validator statuses between phase0 & altair. - // We should benchmark the new state transition and work out whether Altair could - // be accelerated by some similar cache. - let validator_statuses = ValidatorStatuses::new(state, spec)?; - - Ok(EpochProcessingSummary { - total_balances: validator_statuses.total_balances, - statuses: validator_statuses.statuses, + Ok(EpochProcessingSummary::Altair { + participation_cache, }) } diff --git a/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs b/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs index cc629c1ef09..038fe770440 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs @@ -1,3 +1,4 @@ +use super::ParticipationCache; use crate::EpochProcessingError; use core::result::Result; use core::result::Result::Ok; @@ -10,6 +11,7 @@ use types::eth_spec::EthSpec; pub fn process_inactivity_updates( state: &mut BeaconState, + participation_cache: &ParticipationCache, spec: &ChainSpec, ) -> Result<(), EpochProcessingError> { // Score updates based on previous epoch participation, skip genesis epoch @@ -17,15 +19,12 @@ pub fn process_inactivity_updates( return Ok(()); } - let unslashed_indices = state.get_unslashed_participating_indices( - TIMELY_TARGET_FLAG_INDEX, - state.previous_epoch(), - spec, - )?; + let unslashed_indices = participation_cache + .get_unslashed_participating_indices(TIMELY_TARGET_FLAG_INDEX, state.previous_epoch())?; - for index in state.get_eligible_validator_indices()? { + for &index in participation_cache.eligible_validator_indices() { // Increase inactivity score of inactive validators - if unslashed_indices.contains(&index) { + if unslashed_indices.contains(index)? { let inactivity_score = state.get_inactivity_score_mut(index)?; inactivity_score.safe_sub_assign(min(1, *inactivity_score))?; } else { diff --git a/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs b/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs index 13e14d4d8cd..f47d9c0e688 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs @@ -1,13 +1,14 @@ +use super::ParticipationCache; use crate::per_epoch_processing::weigh_justification_and_finalization; use crate::per_epoch_processing::Error; use safe_arith::SafeArith; use types::consts::altair::TIMELY_TARGET_FLAG_INDEX; -use types::{BeaconState, ChainSpec, EthSpec}; +use types::{BeaconState, EthSpec}; /// Update the justified and finalized checkpoints for matching target attestations. pub fn process_justification_and_finalization( state: &mut BeaconState, - spec: &ChainSpec, + participation_cache: &ParticipationCache, ) -> Result<(), Error> { if state.current_epoch() <= T::genesis_epoch().safe_add(1)? { return Ok(()); @@ -15,21 +16,13 @@ pub fn process_justification_and_finalization( let previous_epoch = state.previous_epoch(); let current_epoch = state.current_epoch(); - let previous_indices = state.get_unslashed_participating_indices( - TIMELY_TARGET_FLAG_INDEX, - previous_epoch, - spec, - )?; - let current_indices = - state.get_unslashed_participating_indices(TIMELY_TARGET_FLAG_INDEX, current_epoch, spec)?; - let total_active_balance = state.get_total_balance( - state - .get_active_validator_indices(current_epoch, spec)? - .as_slice(), - spec, - )?; - let previous_target_balance = state.get_total_balance(&previous_indices, spec)?; - let current_target_balance = state.get_total_balance(¤t_indices, spec)?; + let previous_indices = participation_cache + .get_unslashed_participating_indices(TIMELY_TARGET_FLAG_INDEX, previous_epoch)?; + let current_indices = participation_cache + .get_unslashed_participating_indices(TIMELY_TARGET_FLAG_INDEX, current_epoch)?; + let total_active_balance = participation_cache.current_epoch_total_active_balance(); + let previous_target_balance = previous_indices.total_balance()?; + let current_target_balance = current_indices.total_balance()?; weigh_justification_and_finalization( state, total_active_balance, diff --git a/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs b/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs new file mode 100644 index 00000000000..503dadfc708 --- /dev/null +++ b/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs @@ -0,0 +1,405 @@ +//! Provides the `ParticipationCache`, a custom Lighthouse cache which attempts to reduce CPU and +//! memory usage by: +//! +//! - Caching a map of `validator_index -> participation_flags` for all active validators in the +//! previous and current epochs. +//! - Caching the total balances of: +//! - All active validators. +//! - All active validators matching each of the three "timely" flags. +//! - Caching the "eligible" validators. +//! +//! Additionally, this cache is returned from the `altair::process_epoch` function and can be used +//! to get useful summaries about the validator participation in an epoch. + +use safe_arith::{ArithError, SafeArith}; +use std::collections::HashMap; +use types::{ + consts::altair::{ + NUM_FLAG_INDICES, TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, + TIMELY_TARGET_FLAG_INDEX, + }, + BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ParticipationFlags, RelativeEpoch, +}; + +#[derive(Debug, PartialEq)] +pub enum Error { + InvalidFlagIndex(usize), +} + +/// A balance which will never be below the specified `minimum`. +/// +/// This is an effort to ensure the `EFFECTIVE_BALANCE_INCREMENT` minimum is always respected. +#[derive(PartialEq, Debug, Clone, Copy)] +struct Balance { + raw: u64, + minimum: u64, +} + +impl Balance { + /// Initialize the balance to `0`, or the given `minimum`. + pub fn zero(minimum: u64) -> Self { + Self { raw: 0, minimum } + } + + /// Returns the balance with respect to the initialization `minimum`. + pub fn get(&self) -> u64 { + std::cmp::max(self.raw, self.minimum) + } + + /// Add-assign to the balance. + pub fn safe_add_assign(&mut self, other: u64) -> Result<(), ArithError> { + self.raw.safe_add_assign(other) + } +} + +/// Caches the participation values for one epoch (either the previous or current). +#[derive(PartialEq, Debug)] +struct SingleEpochParticipationCache { + /// Maps an active validator index to their participation flags. + /// + /// To reiterate, only active and unslashed validator indices are stored in this map. + /// + /// ## Note + /// + /// It would be ideal to maintain a reference to the `BeaconState` here rather than copying the + /// `ParticipationFlags`, however that would cause us to run into mutable reference limitations + /// upstream. + unslashed_participating_indices: HashMap, + /// Stores the sum of the balances for all validators in `self.unslashed_participating_indices` + /// for all flags in `NUM_FLAG_INDICES`. + /// + /// A flag balance is only incremented if a validator is in that flag set. + total_flag_balances: [Balance; NUM_FLAG_INDICES], + /// Stores the sum of all balances of all validators in `self.unslashed_participating_indices` + /// (regardless of which flags are set). + total_active_balance: Balance, +} + +impl SingleEpochParticipationCache { + fn new(hashmap_len: usize, spec: &ChainSpec) -> Self { + let zero_balance = Balance::zero(spec.effective_balance_increment); + + Self { + unslashed_participating_indices: HashMap::with_capacity(hashmap_len), + total_flag_balances: [zero_balance; NUM_FLAG_INDICES], + total_active_balance: zero_balance, + } + } + + /// Returns the total balance of attesters who have `flag_index` set. + fn total_flag_balance(&self, flag_index: usize) -> Result { + self.total_flag_balances + .get(flag_index) + .map(Balance::get) + .ok_or(Error::InvalidFlagIndex(flag_index)) + } + + /// Returns `true` if `val_index` is active, unslashed and has `flag_index` set. + /// + /// ## Errors + /// + /// May return an error if `flag_index` is out-of-bounds. + fn has_flag(&self, val_index: usize, flag_index: usize) -> Result { + if let Some(participation_flags) = self.unslashed_participating_indices.get(&val_index) { + participation_flags + .has_flag(flag_index) + .map_err(|_| Error::InvalidFlagIndex(flag_index)) + } else { + Ok(false) + } + } + + /// Process an **active** validator, reading from the `state` with respect to the + /// `relative_epoch`. + /// + /// ## Errors + /// + /// - The provided `state` **must** be Altair. An error will be returned otherwise. + /// - An error will be returned if the `val_index` validator is inactive at the given + /// `relative_epoch`. + fn process_active_validator( + &mut self, + val_index: usize, + state: &BeaconState, + relative_epoch: RelativeEpoch, + ) -> Result<(), BeaconStateError> { + let val_balance = state.get_effective_balance(val_index)?; + let validator = state.get_validator(val_index)?; + + // Sanity check to ensure the validator is active. + let epoch = relative_epoch.into_epoch(state.current_epoch()); + if !validator.is_active_at(epoch) { + return Err(BeaconStateError::ValidatorIsInactive { val_index }); + } + + let epoch_participation = match relative_epoch { + RelativeEpoch::Current => state.current_epoch_participation(), + RelativeEpoch::Previous => state.previous_epoch_participation(), + _ => Err(BeaconStateError::EpochOutOfBounds), + }? + .get(val_index) + .ok_or(BeaconStateError::ParticipationOutOfBounds(val_index))?; + + // All active validators increase the total active balance. + self.total_active_balance.safe_add_assign(val_balance)?; + + // Only unslashed validators may proceed. + if validator.slashed { + return Ok(()); + } + + // Add their `ParticipationFlags` to the map. + self.unslashed_participating_indices + .insert(val_index, *epoch_participation); + + // Iterate through all the flags and increment the total flag balances for whichever flags + // are set for `val_index`. + for (flag, balance) in self.total_flag_balances.iter_mut().enumerate() { + if epoch_participation.has_flag(flag)? { + balance.safe_add_assign(val_balance)?; + } + } + + Ok(()) + } +} + +/// Maintains a cache to be used during `altair::process_epoch`. +#[derive(PartialEq, Debug)] +pub struct ParticipationCache { + current_epoch: Epoch, + /// Caches information about active validators pertaining to `self.current_epoch`. + current_epoch_participation: SingleEpochParticipationCache, + previous_epoch: Epoch, + /// Caches information about active validators pertaining to `self.previous_epoch`. + previous_epoch_participation: SingleEpochParticipationCache, + /// Caches the result of the `get_eligible_validator_indices` function. + eligible_indices: Vec, +} + +impl ParticipationCache { + /// Instantiate `Self`, returning a fully initialized cache. + /// + /// ## Errors + /// + /// - The provided `state` **must** be an Altair state. An error will be returned otherwise. + pub fn new( + state: &BeaconState, + spec: &ChainSpec, + ) -> Result { + let current_epoch = state.current_epoch(); + let previous_epoch = state.previous_epoch(); + + let num_previous_epoch_active_vals = state + .get_cached_active_validator_indices(RelativeEpoch::Previous)? + .len(); + let num_current_epoch_active_vals = state + .get_cached_active_validator_indices(RelativeEpoch::Current)? + .len(); + + // Both the current/previous epoch participations are set to a capacity that is slightly + // larger than required. The difference will be due slashed-but-active validators. + let mut current_epoch_participation = + SingleEpochParticipationCache::new(num_current_epoch_active_vals, spec); + let mut previous_epoch_participation = + SingleEpochParticipationCache::new(num_previous_epoch_active_vals, spec); + // Contains the set of validators which are either: + // + // - Active in the previous epoch. + // - Slashed, but not yet withdrawable. + // + // Using the full length of `state.validators` is almost always overkill, but it ensures no + // reallocations. + let mut eligible_indices = Vec::with_capacity(state.validators().len()); + + // Iterate through all validators, updating: + // + // 1. Validator participation for current and previous epochs. + // 2. The "eligible indices". + // + // Care is taken to ensure that the ordering of `eligible_indices` is the same as the + // `get_eligible_validator_indices` function in the spec. + for (val_index, val) in state.validators().iter().enumerate() { + if val.is_active_at(current_epoch) { + current_epoch_participation.process_active_validator( + val_index, + state, + RelativeEpoch::Current, + )?; + } + + if val.is_active_at(previous_epoch) { + previous_epoch_participation.process_active_validator( + val_index, + state, + RelativeEpoch::Previous, + )?; + } + + // Note: a validator might still be "eligible" whilst returning `false` to + // `Validator::is_active_at`. + if state.is_eligible_validator(val_index)? { + eligible_indices.push(val_index) + } + } + + Ok(Self { + current_epoch, + current_epoch_participation, + previous_epoch, + previous_epoch_participation, + eligible_indices, + }) + } + + /// Equivalent to the specification `get_eligible_validator_indices` function. + pub fn eligible_validator_indices(&self) -> &[usize] { + &self.eligible_indices + } + + /// Equivalent to the `get_unslashed_participating_indices` function in the specification. + pub fn get_unslashed_participating_indices( + &self, + flag_index: usize, + epoch: Epoch, + ) -> Result { + let participation = if epoch == self.current_epoch { + &self.current_epoch_participation + } else if epoch == self.previous_epoch { + &self.previous_epoch_participation + } else { + return Err(BeaconStateError::EpochOutOfBounds); + }; + + Ok(UnslashedParticipatingIndices { + participation, + flag_index, + }) + } + + /* + * Balances + */ + + pub fn current_epoch_total_active_balance(&self) -> u64 { + self.current_epoch_participation.total_active_balance.get() + } + + pub fn current_epoch_target_attesting_balance(&self) -> Result { + self.current_epoch_participation + .total_flag_balance(TIMELY_TARGET_FLAG_INDEX) + } + + pub fn previous_epoch_total_active_balance(&self) -> u64 { + self.previous_epoch_participation.total_active_balance.get() + } + + pub fn previous_epoch_target_attesting_balance(&self) -> Result { + self.previous_epoch_participation + .total_flag_balance(TIMELY_TARGET_FLAG_INDEX) + } + + pub fn previous_epoch_source_attesting_balance(&self) -> Result { + self.previous_epoch_participation + .total_flag_balance(TIMELY_SOURCE_FLAG_INDEX) + } + + pub fn previous_epoch_head_attesting_balance(&self) -> Result { + self.previous_epoch_participation + .total_flag_balance(TIMELY_HEAD_FLAG_INDEX) + } + + /* + * Active/Unslashed + */ + + pub fn is_active_unslashed_in_previous_epoch(&self, val_index: usize) -> bool { + self.previous_epoch_participation + .unslashed_participating_indices + .contains_key(&val_index) + } + + pub fn is_active_unslashed_in_current_epoch(&self, val_index: usize) -> bool { + self.current_epoch_participation + .unslashed_participating_indices + .contains_key(&val_index) + } + + /* + * Flags + */ + + /// Always returns false for a slashed validator. + pub fn is_previous_epoch_timely_source_attester( + &self, + val_index: usize, + ) -> Result { + self.previous_epoch_participation + .has_flag(val_index, TIMELY_SOURCE_FLAG_INDEX) + } + + /// Always returns false for a slashed validator. + pub fn is_previous_epoch_timely_target_attester( + &self, + val_index: usize, + ) -> Result { + self.previous_epoch_participation + .has_flag(val_index, TIMELY_TARGET_FLAG_INDEX) + } + + /// Always returns false for a slashed validator. + pub fn is_previous_epoch_timely_head_attester(&self, val_index: usize) -> Result { + self.previous_epoch_participation + .has_flag(val_index, TIMELY_HEAD_FLAG_INDEX) + } + + /// Always returns false for a slashed validator. + pub fn is_current_epoch_timely_source_attester(&self, val_index: usize) -> Result { + self.current_epoch_participation + .has_flag(val_index, TIMELY_SOURCE_FLAG_INDEX) + } + + /// Always returns false for a slashed validator. + pub fn is_current_epoch_timely_target_attester(&self, val_index: usize) -> Result { + self.current_epoch_participation + .has_flag(val_index, TIMELY_TARGET_FLAG_INDEX) + } + + /// Always returns false for a slashed validator. + pub fn is_current_epoch_timely_head_attester(&self, val_index: usize) -> Result { + self.current_epoch_participation + .has_flag(val_index, TIMELY_HEAD_FLAG_INDEX) + } +} + +/// Imitates the return value of the `get_unslashed_participating_indices` in the +/// specification. +/// +/// This struct exists to help make the Lighthouse code read more like the specification. +pub struct UnslashedParticipatingIndices<'a> { + participation: &'a SingleEpochParticipationCache, + flag_index: usize, +} + +impl<'a> UnslashedParticipatingIndices<'a> { + /// Returns `Ok(true)` if the given `val_index` is both: + /// + /// - An active validator. + /// - Has `self.flag_index` set. + pub fn contains(&self, val_index: usize) -> Result { + self.participation.has_flag(val_index, self.flag_index) + } + + /// Returns the sum of all balances of validators which have `self.flag_index` set. + /// + /// ## Notes + /// + /// Respects the `EFFECTIVE_BALANCE_INCREMENT` minimum. + pub fn total_balance(&self) -> Result { + self.participation + .total_flag_balances + .get(self.flag_index) + .ok_or(Error::InvalidFlagIndex(self.flag_index)) + .map(Balance::get) + } +} diff --git a/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs b/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs index 6e1475d06d0..5906e0f8d29 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs @@ -1,3 +1,4 @@ +use super::ParticipationCache; use safe_arith::SafeArith; use types::consts::altair::{ PARTICIPATION_FLAG_WEIGHTS, TIMELY_HEAD_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX, @@ -13,6 +14,7 @@ use crate::per_epoch_processing::{Delta, Error}; /// Spec v1.1.0 pub fn process_rewards_and_penalties( state: &mut BeaconState, + participation_cache: &ParticipationCache, spec: &ChainSpec, ) -> Result<(), Error> { if state.current_epoch() == T::genesis_epoch() { @@ -21,13 +23,20 @@ pub fn process_rewards_and_penalties( let mut deltas = vec![Delta::default(); state.validators().len()]; - let total_active_balance = state.get_total_active_balance(spec)?; + let total_active_balance = participation_cache.current_epoch_total_active_balance(); for flag_index in 0..PARTICIPATION_FLAG_WEIGHTS.len() { - get_flag_index_deltas(&mut deltas, state, flag_index, total_active_balance, spec)?; + get_flag_index_deltas( + &mut deltas, + state, + flag_index, + total_active_balance, + participation_cache, + spec, + )?; } - get_inactivity_penalty_deltas(&mut deltas, state, spec)?; + get_inactivity_penalty_deltas(&mut deltas, state, participation_cache, spec)?; // Apply the deltas, erroring on overflow above but not on overflow below (saturating at 0 // instead). @@ -47,23 +56,23 @@ pub fn get_flag_index_deltas( state: &BeaconState, flag_index: usize, total_active_balance: u64, + participation_cache: &ParticipationCache, spec: &ChainSpec, ) -> Result<(), Error> { let previous_epoch = state.previous_epoch(); let unslashed_participating_indices = - state.get_unslashed_participating_indices(flag_index, previous_epoch, spec)?; + participation_cache.get_unslashed_participating_indices(flag_index, previous_epoch)?; let weight = get_flag_weight(flag_index)?; - let unslashed_participating_balance = - state.get_total_balance(&unslashed_participating_indices, spec)?; + let unslashed_participating_balance = unslashed_participating_indices.total_balance()?; let unslashed_participating_increments = unslashed_participating_balance.safe_div(spec.effective_balance_increment)?; let active_increments = total_active_balance.safe_div(spec.effective_balance_increment)?; - for index in state.get_eligible_validator_indices()? { + for &index in participation_cache.eligible_validator_indices() { let base_reward = get_base_reward(state, index, total_active_balance, spec)?; let mut delta = Delta::default(); - if unslashed_participating_indices.contains(&(index as usize)) { + if unslashed_participating_indices.contains(index as usize)? { if !state.is_in_inactivity_leak(spec) { let reward_numerator = base_reward .safe_mul(weight)? @@ -94,18 +103,16 @@ pub fn get_flag_weight(flag_index: usize) -> Result { pub fn get_inactivity_penalty_deltas( deltas: &mut Vec, state: &BeaconState, + participation_cache: &ParticipationCache, spec: &ChainSpec, ) -> Result<(), Error> { let previous_epoch = state.previous_epoch(); - let matching_target_indices = state.get_unslashed_participating_indices( - TIMELY_TARGET_FLAG_INDEX, - previous_epoch, - spec, - )?; - for index in state.get_eligible_validator_indices()? { + let matching_target_indices = participation_cache + .get_unslashed_participating_indices(TIMELY_TARGET_FLAG_INDEX, previous_epoch)?; + for &index in participation_cache.eligible_validator_indices() { let mut delta = Delta::default(); - if !matching_target_indices.contains(&index) { + if !matching_target_indices.contains(index)? { let penalty_numerator = state .get_validator(index)? .effective_balance diff --git a/consensus/state_processing/src/per_epoch_processing/altair/sync_committee_updates.rs b/consensus/state_processing/src/per_epoch_processing/altair/sync_committee_updates.rs index 1edc845cb4e..294c05d1a47 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/sync_committee_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/sync_committee_updates.rs @@ -1,5 +1,6 @@ use crate::EpochProcessingError; use safe_arith::SafeArith; +use std::sync::Arc; use types::beacon_state::BeaconState; use types::chain_spec::ChainSpec; use types::eth_spec::EthSpec; @@ -12,7 +13,7 @@ pub fn process_sync_committee_updates( if next_epoch.safe_rem(spec.epochs_per_sync_committee_period)? == 0 { *state.current_sync_committee_mut()? = state.next_sync_committee()?.clone(); - *state.next_sync_committee_mut()? = state.get_next_sync_committee(spec)?; + *state.next_sync_committee_mut()? = Arc::new(state.get_next_sync_committee(spec)?); } Ok(()) } diff --git a/consensus/state_processing/src/per_epoch_processing/base.rs b/consensus/state_processing/src/per_epoch_processing/base.rs index c28d4b17803..fd530a2ea78 100644 --- a/consensus/state_processing/src/per_epoch_processing/base.rs +++ b/consensus/state_processing/src/per_epoch_processing/base.rs @@ -1,7 +1,4 @@ use super::{process_registry_updates, process_slashings, EpochProcessingSummary, Error}; -pub use crate::per_epoch_processing::validator_statuses::{ - TotalBalances, ValidatorStatus, ValidatorStatuses, -}; use crate::per_epoch_processing::{ effective_balance_updates::process_effective_balance_updates, historical_roots_update::process_historical_roots_update, @@ -11,10 +8,12 @@ pub use justification_and_finalization::process_justification_and_finalization; pub use participation_record_updates::process_participation_record_updates; pub use rewards_and_penalties::process_rewards_and_penalties; use types::{BeaconState, ChainSpec, EthSpec, RelativeEpoch}; +pub use validator_statuses::{TotalBalances, ValidatorStatus, ValidatorStatuses}; pub mod justification_and_finalization; pub mod participation_record_updates; pub mod rewards_and_penalties; +pub mod validator_statuses; pub fn process_epoch( state: &mut BeaconState, @@ -29,7 +28,7 @@ pub fn process_epoch( // // E.g., attestation in the previous epoch, attested to the head, etc. let mut validator_statuses = ValidatorStatuses::new(state, spec)?; - validator_statuses.process_attestations(&state)?; + validator_statuses.process_attestations(state)?; // Justification and finalization. process_justification_and_finalization(state, &validator_statuses.total_balances, spec)?; @@ -69,7 +68,7 @@ pub fn process_epoch( // Rotate the epoch caches to suit the epoch transition. state.advance_caches()?; - Ok(EpochProcessingSummary { + Ok(EpochProcessingSummary::Base { total_balances: validator_statuses.total_balances, statuses: validator_statuses.statuses, }) diff --git a/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs b/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs index d0983a20fb8..2c1ef6178e7 100644 --- a/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs +++ b/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs @@ -1,8 +1,8 @@ use crate::common::{base::get_base_reward, decrease_balance, increase_balance}; -use crate::per_epoch_processing::validator_statuses::{ - TotalBalances, ValidatorStatus, ValidatorStatuses, +use crate::per_epoch_processing::{ + base::{TotalBalances, ValidatorStatus, ValidatorStatuses}, + Delta, Error, }; -use crate::per_epoch_processing::{Delta, Error}; use safe_arith::SafeArith; use std::array::IntoIter as ArrayIter; use types::{BeaconState, ChainSpec, EthSpec}; @@ -60,7 +60,7 @@ pub fn process_rewards_and_penalties( return Err(Error::ValidatorStatusesInconsistent); } - let deltas = get_attestation_deltas(state, &validator_statuses, spec)?; + let deltas = get_attestation_deltas(state, validator_statuses, spec)?; // Apply the deltas, erroring on overflow above but not on overflow below (saturating at 0 // instead). @@ -88,15 +88,15 @@ pub fn get_attestation_deltas( let total_balances = &validator_statuses.total_balances; - // Filter out ineligible validators. All sub-functions of the spec do this except for - // `get_inclusion_delay_deltas`. It's safe to do so here because any validator that is in the - // unslashed indices of the matching source attestations is active, and therefore eligible. - for (index, validator) in validator_statuses - .statuses - .iter() - .enumerate() - .filter(|(_, validator)| is_eligible_validator(validator)) - { + for (index, validator) in validator_statuses.statuses.iter().enumerate() { + // Ignore ineligible validators. All sub-functions of the spec do this except for + // `get_inclusion_delay_deltas`. It's safe to do so here because any validator that is in + // the unslashed indices of the matching source attestations is active, and therefore + // eligible. + if !state.is_eligible_validator(index)? { + continue; + } + let base_reward = get_base_reward(state, index, total_balances.current_epoch(), spec)?; let source_delta = @@ -281,11 +281,3 @@ fn get_inactivity_penalty_delta( fn get_proposer_reward(base_reward: u64, spec: &ChainSpec) -> Result { Ok(base_reward.safe_div(spec.proposer_reward_quotient)?) } - -/// Is the validator eligible for penalties and rewards at the current epoch? -/// -/// Spec: v0.12.1 -fn is_eligible_validator(validator: &ValidatorStatus) -> bool { - validator.is_active_in_previous_epoch - || (validator.is_slashed && !validator.is_withdrawable_in_current_epoch) -} diff --git a/consensus/state_processing/src/per_epoch_processing/validator_statuses.rs b/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs similarity index 100% rename from consensus/state_processing/src/per_epoch_processing/validator_statuses.rs rename to consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs diff --git a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs new file mode 100644 index 00000000000..6c3fb15180a --- /dev/null +++ b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs @@ -0,0 +1,278 @@ +use super::{ + altair::{participation_cache::Error as ParticipationCacheError, ParticipationCache}, + base::{validator_statuses::InclusionInfo, TotalBalances, ValidatorStatus}, +}; +use crate::metrics; + +/// Provides a summary of validator participation during the epoch. +#[derive(PartialEq, Debug)] +pub enum EpochProcessingSummary { + Base { + total_balances: TotalBalances, + statuses: Vec, + }, + Altair { + participation_cache: ParticipationCache, + }, +} + +impl EpochProcessingSummary { + /// Updates some Prometheus metrics with some values in `self`. + #[cfg(feature = "metrics")] + pub fn observe_metrics(&self) -> Result<(), ParticipationCacheError> { + metrics::set_gauge( + &metrics::PARTICIPATION_PREV_EPOCH_HEAD_ATTESTING_GWEI_TOTAL, + self.previous_epoch_head_attesting_balance()? as i64, + ); + metrics::set_gauge( + &metrics::PARTICIPATION_PREV_EPOCH_TARGET_ATTESTING_GWEI_TOTAL, + self.previous_epoch_target_attesting_balance()? as i64, + ); + metrics::set_gauge( + &metrics::PARTICIPATION_PREV_EPOCH_SOURCE_ATTESTING_GWEI_TOTAL, + self.previous_epoch_source_attesting_balance()? as i64, + ); + metrics::set_gauge( + &metrics::PARTICIPATION_PREV_EPOCH_ACTIVE_GWEI_TOTAL, + self.previous_epoch_total_active_balance() as i64, + ); + + Ok(()) + } + + /// Returns the sum of the effective balance of all validators in the current epoch. + pub fn current_epoch_total_active_balance(&self) -> u64 { + match self { + EpochProcessingSummary::Base { total_balances, .. } => total_balances.current_epoch(), + EpochProcessingSummary::Altair { + participation_cache, + } => participation_cache.current_epoch_total_active_balance(), + } + } + + /// Returns the sum of the effective balance of all validators in the current epoch who + /// included an attestation that matched the target. + pub fn current_epoch_target_attesting_balance(&self) -> Result { + match self { + EpochProcessingSummary::Base { total_balances, .. } => { + Ok(total_balances.current_epoch_target_attesters()) + } + EpochProcessingSummary::Altair { + participation_cache, + } => participation_cache.current_epoch_target_attesting_balance(), + } + } + + /// Returns the sum of the effective balance of all validators in the previous epoch. + pub fn previous_epoch_total_active_balance(&self) -> u64 { + match self { + EpochProcessingSummary::Base { total_balances, .. } => total_balances.previous_epoch(), + EpochProcessingSummary::Altair { + participation_cache, + } => participation_cache.previous_epoch_total_active_balance(), + } + } + + /// Returns `true` if `val_index` was included in the active validator indices in the current + /// epoch *and* the validator is not slashed. + /// + /// ## Notes + /// + /// Always returns `false` for an unknown `val_index`. + pub fn is_active_unslashed_in_current_epoch(&self, val_index: usize) -> bool { + match self { + EpochProcessingSummary::Base { statuses, .. } => statuses + .get(val_index) + .map_or(false, |s| s.is_active_in_current_epoch && !s.is_slashed), + EpochProcessingSummary::Altair { + participation_cache, + .. + } => participation_cache.is_active_unslashed_in_current_epoch(val_index), + } + } + + /// Returns `true` if `val_index` had a target-matching attestation included on chain in the + /// current epoch. + /// + /// ## Differences between Base and Altair + /// + /// - Base: active validators return `true`. + /// - Altair: only active and *unslashed* validators return `true`. + /// + /// ## Notes + /// + /// Always returns `false` for an unknown `val_index`. + pub fn is_current_epoch_target_attester( + &self, + val_index: usize, + ) -> Result { + match self { + EpochProcessingSummary::Base { statuses, .. } => Ok(statuses + .get(val_index) + .map_or(false, |s| s.is_current_epoch_target_attester)), + EpochProcessingSummary::Altair { + participation_cache, + .. + } => participation_cache.is_current_epoch_timely_target_attester(val_index), + } + } + + /// Returns the sum of the effective balance of all validators in the previous epoch who + /// included an attestation that matched the target. + pub fn previous_epoch_target_attesting_balance(&self) -> Result { + match self { + EpochProcessingSummary::Base { total_balances, .. } => { + Ok(total_balances.previous_epoch_target_attesters()) + } + EpochProcessingSummary::Altair { + participation_cache, + } => participation_cache.previous_epoch_target_attesting_balance(), + } + } + + /// Returns the sum of the effective balance of all validators in the previous epoch who + /// included an attestation that matched the head. + /// + /// ## Differences between Base and Altair + /// + /// - Base: any attestation can match the head. + /// - Altair: only "timely" attestations can match the head. + pub fn previous_epoch_head_attesting_balance(&self) -> Result { + match self { + EpochProcessingSummary::Base { total_balances, .. } => { + Ok(total_balances.previous_epoch_head_attesters()) + } + EpochProcessingSummary::Altair { + participation_cache, + } => participation_cache.previous_epoch_head_attesting_balance(), + } + } + + /// Returns the sum of the effective balance of all validators in the previous epoch who + /// included an attestation that matched the source. + /// + /// ## Differences between Base and Altair + /// + /// - Base: any attestation can match the source. + /// - Altair: only "timely" attestations can match the source. + pub fn previous_epoch_source_attesting_balance(&self) -> Result { + match self { + EpochProcessingSummary::Base { total_balances, .. } => { + Ok(total_balances.previous_epoch_attesters()) + } + EpochProcessingSummary::Altair { + participation_cache, + } => participation_cache.previous_epoch_source_attesting_balance(), + } + } + + /// Returns `true` if `val_index` was included in the active validator indices in the previous + /// epoch *and* the validator is not slashed. + /// + /// ## Notes + /// + /// Always returns `false` for an unknown `val_index`. + pub fn is_active_unslashed_in_previous_epoch(&self, val_index: usize) -> bool { + match self { + EpochProcessingSummary::Base { statuses, .. } => statuses + .get(val_index) + .map_or(false, |s| s.is_active_in_previous_epoch && !s.is_slashed), + EpochProcessingSummary::Altair { + participation_cache, + .. + } => participation_cache.is_active_unslashed_in_previous_epoch(val_index), + } + } + + /// Returns `true` if `val_index` had a target-matching attestation included on chain in the + /// previous epoch. + /// + /// ## Notes + /// + /// Always returns `false` for an unknown `val_index`. + pub fn is_previous_epoch_target_attester( + &self, + val_index: usize, + ) -> Result { + match self { + EpochProcessingSummary::Base { statuses, .. } => Ok(statuses + .get(val_index) + .map_or(false, |s| s.is_previous_epoch_target_attester)), + EpochProcessingSummary::Altair { + participation_cache, + .. + } => participation_cache.is_previous_epoch_timely_target_attester(val_index), + } + } + + /// Returns `true` if `val_index` had a head-matching attestation included on chain in the + /// previous epoch. + /// + /// ## Differences between Base and Altair + /// + /// - Base: any attestation can match the head. + /// - Altair: only "timely" attestations can match the head. + /// + /// ## Notes + /// + /// Always returns `false` for an unknown `val_index`. + pub fn is_previous_epoch_head_attester( + &self, + val_index: usize, + ) -> Result { + match self { + EpochProcessingSummary::Base { statuses, .. } => Ok(statuses + .get(val_index) + .map_or(false, |s| s.is_previous_epoch_head_attester)), + EpochProcessingSummary::Altair { + participation_cache, + .. + } => participation_cache.is_previous_epoch_timely_head_attester(val_index), + } + } + + /// Returns `true` if `val_index` had a source-matching attestation included on chain in the + /// previous epoch. + /// + /// ## Differences between Base and Altair + /// + /// - Base: any attestation can match the head. + /// - Altair: only "timely" attestations can match the source. + /// + /// ## Notes + /// + /// Always returns `false` for an unknown `val_index`. + pub fn is_previous_epoch_source_attester( + &self, + val_index: usize, + ) -> Result { + match self { + EpochProcessingSummary::Base { statuses, .. } => Ok(statuses + .get(val_index) + .map_or(false, |s| s.is_previous_epoch_attester)), + EpochProcessingSummary::Altair { + participation_cache, + .. + } => participation_cache.is_previous_epoch_timely_source_attester(val_index), + } + } + + /// Returns information about the inclusion distance for `val_index` for the previous epoch. + /// + /// ## Differences between Base and Altair + /// + /// - Base: always returns `Some` if the validator had an attestation included on-chain. + /// - Altair: always returns `None`. + /// + /// ## Notes + /// + /// Always returns `false` for an unknown `val_index`. + pub fn previous_epoch_inclusion_info(&self, val_index: usize) -> Option { + match self { + EpochProcessingSummary::Base { statuses, .. } => { + statuses.get(val_index).and_then(|s| s.inclusion_info) + } + EpochProcessingSummary::Altair { .. } => None, + } + } +} diff --git a/consensus/state_processing/src/per_epoch_processing/errors.rs b/consensus/state_processing/src/per_epoch_processing/errors.rs index 651bf41ca26..04797c56342 100644 --- a/consensus/state_processing/src/per_epoch_processing/errors.rs +++ b/consensus/state_processing/src/per_epoch_processing/errors.rs @@ -1,3 +1,4 @@ +use crate::per_epoch_processing::altair::participation_cache::Error as ParticipationCacheError; use types::{BeaconStateError, InconsistentFork}; #[derive(Debug, PartialEq)] @@ -23,6 +24,7 @@ pub enum EpochProcessingError { InconsistentStateFork(InconsistentFork), InvalidJustificationBit(ssz_types::Error), InvalidFlagIndex(usize), + ParticipationCache(ParticipationCacheError), } impl From for EpochProcessingError { @@ -49,6 +51,12 @@ impl From for EpochProcessingError { } } +impl From for EpochProcessingError { + fn from(e: ParticipationCacheError) -> EpochProcessingError { + EpochProcessingError::ParticipationCache(e) + } +} + #[derive(Debug, PartialEq)] pub enum InclusionError { /// The validator did not participate in an attestation in this period. diff --git a/consensus/state_processing/src/upgrade/altair.rs b/consensus/state_processing/src/upgrade/altair.rs index 34ccc9e0b0d..476279998c5 100644 --- a/consensus/state_processing/src/upgrade/altair.rs +++ b/consensus/state_processing/src/upgrade/altair.rs @@ -1,5 +1,6 @@ use crate::common::{get_attestation_participation_flag_indices, get_attesting_indices}; use std::mem; +use std::sync::Arc; use types::{ BeaconState, BeaconStateAltair, BeaconStateError as Error, ChainSpec, EthSpec, Fork, ParticipationFlags, PendingAttestation, RelativeEpoch, SyncCommittee, VariableList, @@ -25,7 +26,7 @@ pub fn translate_participation( // Apply flags to all attesting validators. let committee = state.get_beacon_committee(data.slot, data.index)?; let attesting_indices = - get_attesting_indices::(&committee.committee, &attestation.aggregation_bits)?; + get_attesting_indices::(committee.committee, &attestation.aggregation_bits)?; let epoch_participation = state.previous_epoch_participation_mut()?; for index in attesting_indices { @@ -52,6 +53,8 @@ pub fn upgrade_to_altair( VariableList::new(vec![ParticipationFlags::default(); pre.validators.len()])?; let inactivity_scores = VariableList::new(vec![0; pre.validators.len()])?; + let temp_sync_committee = Arc::new(SyncCommittee::temporary()?); + // Where possible, use something like `mem::take` to move fields from behind the &mut // reference. For other fields that don't have a good default value, use `clone`. // @@ -94,8 +97,8 @@ pub fn upgrade_to_altair( // Inactivity inactivity_scores, // Sync committees - current_sync_committee: SyncCommittee::temporary()?, // not read - next_sync_committee: SyncCommittee::temporary()?, // not read + current_sync_committee: temp_sync_committee.clone(), // not read + next_sync_committee: temp_sync_committee, // not read // Caches committee_caches: mem::take(&mut pre.committee_caches), pubkey_cache: mem::take(&mut pre.pubkey_cache), @@ -109,9 +112,9 @@ pub fn upgrade_to_altair( // Fill in sync committees // Note: A duplicate committee is assigned for the current and next committee at the fork // boundary - let sync_committee = post.get_next_sync_committee(spec)?; - post.as_altair_mut()?.current_sync_committee = sync_committee.clone(); - post.as_altair_mut()?.next_sync_committee = sync_committee; + let sync_committee = Arc::new(post.get_next_sync_committee(spec)?); + *post.current_sync_committee_mut()? = sync_committee.clone(); + *post.next_sync_committee_mut()? = sync_committee; *pre_state = post; diff --git a/consensus/tree_hash/src/merkleize_padded.rs b/consensus/tree_hash/src/merkleize_padded.rs index 02c8af1b36f..f7dce399497 100644 --- a/consensus/tree_hash/src/merkleize_padded.rs +++ b/consensus/tree_hash/src/merkleize_padded.rs @@ -221,7 +221,7 @@ mod test { use crate::ZERO_HASHES_MAX_INDEX; pub fn reference_root(bytes: &[u8]) -> Hash256 { - crate::merkleize_standard(&bytes) + crate::merkleize_standard(bytes) } macro_rules! common_tests { @@ -322,7 +322,7 @@ mod test { assert_eq!( reference_root(&reference_input), - merkleize_padded(&input, min_nodes), + merkleize_padded(input, min_nodes), "input.len(): {:?}", input.len() ); diff --git a/consensus/tree_hash_derive/src/lib.rs b/consensus/tree_hash_derive/src/lib.rs index 1317e56e86e..f1a94114bbe 100644 --- a/consensus/tree_hash_derive/src/lib.rs +++ b/consensus/tree_hash_derive/src/lib.rs @@ -23,14 +23,14 @@ fn get_hashable_fields_and_their_caches( .fields .iter() .filter_map(|f| { - if should_skip_hashing(&f) { + if should_skip_hashing(f) { None } else { let ident = f .ident .as_ref() .expect("tree_hash_derive only supports named struct fields"); - let opt_cache_field = get_cache_field_for(&f); + let opt_cache_field = get_cache_field_for(f); Some((ident, f.ty.clone(), opt_cache_field)) } }) @@ -94,7 +94,7 @@ fn tree_hash_derive_struct(item: &DeriveInput, struct_data: &DataStruct) -> Toke let name = &item.ident; let (impl_generics, ty_generics, where_clause) = &item.generics.split_for_impl(); - let idents = get_hashable_fields(&struct_data); + let idents = get_hashable_fields(struct_data); let num_leaves = idents.len(); let output = quote! { diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index bf79d378690..6f3d18d2802 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -22,7 +22,7 @@ merkle_proof = { path = "../merkle_proof" } rayon = "1.4.1" rand = "0.7.3" safe_arith = { path = "../safe_arith" } -serde = "1.0.116" +serde = {version = "1.0.116" , features = ["rc"] } serde_derive = "1.0.116" slog = "2.5.2" eth2_ssz = "0.1.2" diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index ed7b14a341a..17384857a04 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -1,14 +1,17 @@ -use super::{ - AggregateSignature, AttestationData, BitList, ChainSpec, Domain, EthSpec, Fork, SecretKey, - SignedRoot, -}; -use crate::{test_utils::TestRandom, Hash256}; use safe_arith::ArithError; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::slot_data::SlotData; +use crate::{test_utils::TestRandom, Hash256, Slot}; + +use super::{ + AggregateSignature, AttestationData, BitList, ChainSpec, Domain, EthSpec, Fork, SecretKey, + SignedRoot, +}; + #[derive(Debug, PartialEq)] pub enum Error { SszTypesError(ssz_types::Error), @@ -84,10 +87,17 @@ impl Attestation { } } +impl SlotData for Attestation { + fn get_slot(&self) -> Slot { + self.data.slot + } +} + #[cfg(test)] mod tests { - use super::*; use crate::*; + use super::*; + ssz_and_tree_hash_tests!(Attestation); } diff --git a/consensus/types/src/attestation_data.rs b/consensus/types/src/attestation_data.rs index 07fa529e0ff..3eee735f74c 100644 --- a/consensus/types/src/attestation_data.rs +++ b/consensus/types/src/attestation_data.rs @@ -1,6 +1,7 @@ use crate::test_utils::TestRandom; use crate::{Checkpoint, Hash256, SignedRoot, Slot}; +use crate::slot_data::SlotData; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; @@ -39,6 +40,12 @@ pub struct AttestationData { impl SignedRoot for AttestationData {} +impl SlotData for AttestationData { + fn get_slot(&self) -> Slot { + self.slot + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index dcdaf059c39..54437969ca9 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -13,16 +13,18 @@ use serde_derive::{Deserialize, Serialize}; use ssz::{ssz_encode, Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; use ssz_types::{typenum::Unsigned, BitVector, FixedVector}; -use std::collections::HashSet; use std::convert::TryInto; -use std::{fmt, mem}; +use std::{fmt, mem, sync::Arc}; use superstruct::superstruct; use swap_or_not_shuffle::compute_shuffled_index; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; -pub use self::committee_cache::CommitteeCache; +pub use self::committee_cache::{ + compute_committee_index_in_epoch, compute_committee_range_in_epoch, epoch_committee_count, + CommitteeCache, +}; pub use clone_config::CloneConfig; pub use eth_spec::*; pub use iter::BlockRootsIter; @@ -50,6 +52,9 @@ pub enum Error { UnableToDetermineProducer, InvalidBitfield, ValidatorIsWithdrawable, + ValidatorIsInactive { + val_index: usize, + }, UnableToShuffle, ShuffleIndexOutOfBounds(usize), IsAggregatorOutOfBounds, @@ -110,6 +115,10 @@ pub enum Error { ArithError(ArithError), MissingBeaconBlock(SignedBeaconBlockHash), MissingBeaconState(BeaconStateHash), + SyncCommitteeNotKnown { + current_epoch: Epoch, + epoch: Epoch, + }, } /// Control whether an epoch-indexed field can be indexed at the next epoch or not. @@ -255,9 +264,9 @@ where // Light-client sync committees #[superstruct(only(Altair))] - pub current_sync_committee: SyncCommittee, + pub current_sync_committee: Arc>, #[superstruct(only(Altair))] - pub next_sync_committee: SyncCommittee, + pub next_sync_committee: Arc>, // Caching (not in the spec) #[serde(skip_serializing, skip_deserializing)] @@ -480,7 +489,7 @@ impl BeaconState { ) -> Result<&[usize], Error> { let cache = self.committee_cache(relative_epoch)?; - Ok(&cache.active_validator_indices()) + Ok(cache.active_validator_indices()) } /// Returns the active validator indices for the given epoch. @@ -730,6 +739,28 @@ impl BeaconState { Ok(hash(&preimage)) } + /// Get the already-built current or next sync committee from the state. + pub fn get_built_sync_committee( + &self, + epoch: Epoch, + spec: &ChainSpec, + ) -> Result<&Arc>, Error> { + let sync_committee_period = epoch.sync_committee_period(spec)?; + let current_sync_committee_period = self.current_epoch().sync_committee_period(spec)?; + let next_sync_committee_period = current_sync_committee_period.safe_add(1)?; + + if sync_committee_period == current_sync_committee_period { + self.current_sync_committee() + } else if sync_committee_period == next_sync_committee_period { + self.next_sync_committee() + } else { + Err(Error::SyncCommitteeNotKnown { + current_epoch: self.current_epoch(), + epoch, + }) + } + } + /// Get the validator indices of all validators from `sync_committee`. pub fn get_sync_committee_indices( &mut self, @@ -739,7 +770,7 @@ impl BeaconState { .pubkeys .iter() .map(|pubkey| { - self.get_validator_index(&pubkey)? + self.get_validator_index(pubkey)? .ok_or(Error::PubkeyCacheInconsistent) }) .collect() @@ -1282,10 +1313,22 @@ impl BeaconState { let epoch = relative_epoch.into_epoch(self.current_epoch()); let i = Self::committee_cache_index(relative_epoch); - *self.committee_cache_at_index_mut(i)? = CommitteeCache::initialized(&self, epoch, spec)?; + *self.committee_cache_at_index_mut(i)? = self.initialize_committee_cache(epoch, spec)?; Ok(()) } + /// Initializes a new committee cache for the given `epoch`, regardless of whether one already + /// exists. Returns the committee cache without attaching it to `self`. + /// + /// To build a cache and store it on `self`, use `Self::build_committee_cache`. + pub fn initialize_committee_cache( + &self, + epoch: Epoch, + spec: &ChainSpec, + ) -> Result { + CommitteeCache::initialized(self, epoch, spec) + } + /// Advances the cache for this state into the next epoch. /// /// This should be used if the `slot` of this state is advanced beyond an epoch boundary. @@ -1395,7 +1438,7 @@ impl BeaconState { if let Some(mut cache) = cache { // Note: we return early if the tree hash fails, leaving `self.tree_hash_cache` as // None. There's no need to keep a cache that fails. - let root = cache.recalculate_tree_hash_root(&self)?; + let root = cache.recalculate_tree_hash_root(self)?; self.tree_hash_cache_mut().restore(cache); Ok(root) } else { @@ -1452,68 +1495,40 @@ impl BeaconState { self.clone_with(CloneConfig::committee_caches_only()) } - /// Get the unslashed participating indices for a given `flag_index`. - /// - /// The `self` state must be Altair or later. - pub fn get_unslashed_participating_indices( - &self, - flag_index: usize, - epoch: Epoch, - spec: &ChainSpec, - ) -> Result, Error> { - let epoch_participation = if epoch == self.current_epoch() { - self.current_epoch_participation()? - } else if epoch == self.previous_epoch() { - self.previous_epoch_participation()? - } else { - return Err(Error::EpochOutOfBounds); - }; - let active_validator_indices = self.get_active_validator_indices(epoch, spec)?; - itertools::process_results( - active_validator_indices.into_iter().map(|val_index| { - let has_flag = epoch_participation - .get(val_index) - .ok_or(Error::ParticipationOutOfBounds(val_index))? - .has_flag(flag_index)?; - let not_slashed = !self.get_validator(val_index)?.slashed; - Ok((val_index, has_flag && not_slashed)) - }), - |iter| { - iter.filter(|(_, eligible)| *eligible) - .map(|(validator_index, _)| validator_index) - .collect() - }, - ) - } - - pub fn get_eligible_validator_indices(&self) -> Result, Error> { - match self { - BeaconState::Base(_) => Err(Error::IncorrectStateVariant), - BeaconState::Altair(_) => { - let previous_epoch = self.previous_epoch(); - Ok(self - .validators() - .iter() - .enumerate() - .filter_map(|(i, val)| { - if val.is_active_at(previous_epoch) - || (val.slashed - && previous_epoch + Epoch::new(1) < val.withdrawable_epoch) - { - Some(i) - } else { - None - } - }) - .collect()) - } - } + pub fn is_eligible_validator(&self, val_index: usize) -> Result { + let previous_epoch = self.previous_epoch(); + self.get_validator(val_index).map(|val| { + val.is_active_at(previous_epoch) + || (val.slashed && previous_epoch + Epoch::new(1) < val.withdrawable_epoch) + }) } pub fn is_in_inactivity_leak(&self, spec: &ChainSpec) -> bool { (self.previous_epoch() - self.finalized_checkpoint().epoch) > spec.min_epochs_to_inactivity_penalty } + + /// Get the `SyncCommittee` associated with the next slot. Useful because sync committees + /// assigned to `slot` sign for `slot - 1`. This creates the exceptional logic below when + /// transitioning between sync committee periods. + pub fn get_sync_committee_for_next_slot( + &self, + spec: &ChainSpec, + ) -> Result>, Error> { + let next_slot_epoch = self + .slot() + .saturating_add(Slot::new(1)) + .epoch(T::slots_per_epoch()); + + let sync_committee = if self.current_epoch().sync_committee_period(spec) + == next_slot_epoch.sync_committee_period(spec) + { + self.current_sync_committee()?.clone() + } else { + self.next_sync_committee()?.clone() + }; + Ok(sync_committee) + } } impl From for Error { diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index 9c8f428d83e..a4e446aee27 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -121,8 +121,12 @@ impl CommitteeCache { return None; } - let committee_index = - (slot.as_u64() % self.slots_per_epoch) * self.committees_per_slot + index; + let committee_index = compute_committee_index_in_epoch( + slot, + self.slots_per_epoch as usize, + self.committees_per_slot as usize, + index as usize, + ); let committee = self.compute_committee(committee_index as usize)?; Some(BeaconCommittee { @@ -219,7 +223,10 @@ impl CommitteeCache { /// /// Spec v0.12.1 pub fn epoch_committee_count(&self) -> usize { - self.committees_per_slot as usize * self.slots_per_epoch as usize + epoch_committee_count( + self.committees_per_slot as usize, + self.slots_per_epoch as usize, + ) } /// Returns the number of committees per slot for this cache's epoch. @@ -242,16 +249,7 @@ impl CommitteeCache { /// /// Spec v0.12.1 fn compute_committee_range(&self, index: usize) -> Option> { - let count = self.epoch_committee_count(); - if count == 0 || index >= count { - return None; - } - - let num_validators = self.shuffling.len(); - let start = (num_validators * index) / count; - let end = (num_validators * (index + 1)) / count; - - Some(start..end) + compute_committee_range_in_epoch(self.epoch_committee_count(), index, self.shuffling.len()) } /// Returns the index of some validator in `self.shuffling`. @@ -264,6 +262,44 @@ impl CommitteeCache { } } +/// Computes the position of the given `committee_index` with respect to all committees in the +/// epoch. +/// +/// The return result may be used to provide input to the `compute_committee_range_in_epoch` +/// function. +pub fn compute_committee_index_in_epoch( + slot: Slot, + slots_per_epoch: usize, + committees_per_slot: usize, + committee_index: usize, +) -> usize { + (slot.as_usize() % slots_per_epoch) * committees_per_slot + committee_index +} + +/// Computes the range for slicing the shuffled indices to determine the members of a committee. +/// +/// The `index_in_epoch` parameter can be computed computed using +/// `compute_committee_index_in_epoch`. +pub fn compute_committee_range_in_epoch( + epoch_committee_count: usize, + index_in_epoch: usize, + shuffling_len: usize, +) -> Option> { + if epoch_committee_count == 0 || index_in_epoch >= epoch_committee_count { + return None; + } + + let start = (shuffling_len * index_in_epoch) / epoch_committee_count; + let end = (shuffling_len * (index_in_epoch + 1)) / epoch_committee_count; + + Some(start..end) +} + +/// Returns the total number of committees in an epoch. +pub fn epoch_committee_count(committees_per_slot: usize, slots_per_epoch: usize) -> usize { + committees_per_slot * slots_per_epoch +} + /// Returns a list of all `validators` indices where the validator is active at the given /// `epoch`. /// diff --git a/consensus/types/src/beacon_state/committee_cache/tests.rs b/consensus/types/src/beacon_state/committee_cache/tests.rs index e4a7ccf4616..f6799e0fd04 100644 --- a/consensus/types/src/beacon_state/committee_cache/tests.rs +++ b/consensus/types/src/beacon_state/committee_cache/tests.rs @@ -67,13 +67,13 @@ fn initializes_with_the_right_epoch() { let cache = CommitteeCache::default(); assert!(!cache.is_initialized_at(state.current_epoch())); - let cache = CommitteeCache::initialized(&state, state.current_epoch(), &spec).unwrap(); + let cache = CommitteeCache::initialized(&state, state.current_epoch(), spec).unwrap(); assert!(cache.is_initialized_at(state.current_epoch())); - let cache = CommitteeCache::initialized(&state, state.previous_epoch(), &spec).unwrap(); + let cache = CommitteeCache::initialized(&state, state.previous_epoch(), spec).unwrap(); assert!(cache.is_initialized_at(state.previous_epoch())); - let cache = CommitteeCache::initialized(&state, state.next_epoch().unwrap(), &spec).unwrap(); + let cache = CommitteeCache::initialized(&state, state.next_epoch().unwrap(), spec).unwrap(); assert!(cache.is_initialized_at(state.next_epoch().unwrap())); } diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index ff7c503f249..79011376865 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -59,7 +59,7 @@ fn test_beacon_proposer_index() { // Get the i'th candidate proposer for the given state and slot let ith_candidate = |state: &BeaconState, slot: Slot, i: usize, spec: &ChainSpec| { let epoch = slot.epoch(T::slots_per_epoch()); - let seed = state.get_beacon_proposer_seed(slot, &spec).unwrap(); + let seed = state.get_beacon_proposer_seed(slot, spec).unwrap(); let active_validators = state.get_active_validator_indices(epoch, spec).unwrap(); active_validators[compute_shuffled_index( i, @@ -338,7 +338,7 @@ mod committees { new_head_state, cache_epoch, validator_count as usize, - &spec, + spec, ); } diff --git a/consensus/types/src/beacon_state/tree_hash_cache.rs b/consensus/types/src/beacon_state/tree_hash_cache.rs index 22b6ace21e6..863970c278d 100644 --- a/consensus/types/src/beacon_state/tree_hash_cache.rs +++ b/consensus/types/src/beacon_state/tree_hash_cache.rs @@ -3,7 +3,9 @@ #![allow(clippy::indexing_slicing)] use super::Error; -use crate::{BeaconState, EthSpec, Hash256, Slot, Unsigned, Validator}; +use crate::{ + BeaconState, EthSpec, Hash256, ParticipationFlags, ParticipationList, Slot, Unsigned, Validator, +}; use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, TreeHashCache}; use rayon::prelude::*; use ssz_derive::{Decode, Encode}; @@ -139,6 +141,9 @@ pub struct BeaconTreeHashCacheInner { randao_mixes: TreeHashCache, slashings: TreeHashCache, eth1_data_votes: Eth1DataVotesTreeHashCache, + // Participation caches + previous_epoch_participation: ParticipationTreeHashCache, + current_epoch_participation: ParticipationTreeHashCache, } impl BeaconTreeHashCacheInner { @@ -163,6 +168,11 @@ impl BeaconTreeHashCacheInner { let mut slashings_arena = CacheArena::default(); let slashings = state.slashings().new_tree_hash_cache(&mut slashings_arena); + let previous_epoch_participation = + ParticipationTreeHashCache::new(state, BeaconState::previous_epoch_participation); + let current_epoch_participation = + ParticipationTreeHashCache::new(state, BeaconState::current_epoch_participation); + Self { previous_state: None, validators, @@ -176,6 +186,8 @@ impl BeaconTreeHashCacheInner { randao_mixes, slashings, eth1_data_votes: Eth1DataVotesTreeHashCache::new(state), + previous_epoch_participation, + current_epoch_participation, } } @@ -235,7 +247,7 @@ impl BeaconTreeHashCacheInner { hasher.write(state.eth1_data().tree_hash_root().as_bytes())?; hasher.write( self.eth1_data_votes - .recalculate_tree_hash_root(&state)? + .recalculate_tree_hash_root(state)? .as_bytes(), )?; hasher.write(state.eth1_deposit_index().tree_hash_root().as_bytes())?; @@ -264,31 +276,25 @@ impl BeaconTreeHashCacheInner { )?; // Participation - match state { - BeaconState::Base(state) => { - hasher.write( - state - .previous_epoch_attestations - .tree_hash_root() - .as_bytes(), - )?; - hasher.write(state.current_epoch_attestations.tree_hash_root().as_bytes())?; - } - // FIXME(altair): add a cache to accelerate hashing of these fields - BeaconState::Altair(state) => { - hasher.write( - state - .previous_epoch_participation - .tree_hash_root() - .as_bytes(), - )?; - hasher.write( - state - .current_epoch_participation - .tree_hash_root() - .as_bytes(), - )?; - } + if let BeaconState::Base(state) = state { + hasher.write( + state + .previous_epoch_attestations + .tree_hash_root() + .as_bytes(), + )?; + hasher.write(state.current_epoch_attestations.tree_hash_root().as_bytes())?; + } else { + hasher.write( + self.previous_epoch_participation + .recalculate_tree_hash_root(state.previous_epoch_participation()?)? + .as_bytes(), + )?; + hasher.write( + self.current_epoch_participation + .recalculate_tree_hash_root(state.current_epoch_participation()?)? + .as_bytes(), + )?; } hasher.write(state.justification_bits().tree_hash_root().as_bytes())?; @@ -506,6 +512,60 @@ impl ParallelValidatorTreeHash { } } +#[derive(Debug, PartialEq, Clone)] +pub struct ParticipationTreeHashCache { + inner: Option, +} + +#[derive(Debug, PartialEq, Clone)] +pub struct ParticipationTreeHashCacheInner { + arena: CacheArena, + tree_hash_cache: TreeHashCache, +} + +impl ParticipationTreeHashCache { + /// Initialize a new cache for the participation list returned by `field` (if any). + fn new( + state: &BeaconState, + field: impl FnOnce( + &BeaconState, + ) -> Result< + &VariableList, + Error, + >, + ) -> Self { + let inner = field(state).map(ParticipationTreeHashCacheInner::new).ok(); + Self { inner } + } + + /// Compute the tree hash root for the given `epoch_participation`. + /// + /// This function will initialize the inner cache if necessary (e.g. when crossing the fork). + fn recalculate_tree_hash_root( + &mut self, + epoch_participation: &VariableList, + ) -> Result { + let cache = self + .inner + .get_or_insert_with(|| ParticipationTreeHashCacheInner::new(epoch_participation)); + ParticipationList::new(epoch_participation) + .recalculate_tree_hash_root(&mut cache.arena, &mut cache.tree_hash_cache) + .map_err(Into::into) + } +} + +impl ParticipationTreeHashCacheInner { + fn new(epoch_participation: &VariableList) -> Self { + let mut arena = CacheArena::default(); + let tree_hash_cache = + ParticipationList::new(epoch_participation).new_tree_hash_cache(&mut arena); + ParticipationTreeHashCacheInner { + arena, + tree_hash_cache, + } + } +} + #[cfg(feature = "arbitrary-fuzz")] impl arbitrary::Arbitrary for BeaconTreeHashCache { fn arbitrary(_u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { @@ -516,6 +576,7 @@ impl arbitrary::Arbitrary for BeaconTreeHashCache { #[cfg(test)] mod test { use super::*; + use crate::MainnetEthSpec; #[test] fn validator_node_count() { @@ -524,4 +585,29 @@ mod test { let _cache = v.new_tree_hash_cache(&mut arena); assert_eq!(arena.backing_len(), NODES_PER_VALIDATOR); } + + #[test] + fn participation_flags() { + type N = ::ValidatorRegistryLimit; + let len = 65; + let mut test_flag = ParticipationFlags::default(); + test_flag.add_flag(0).unwrap(); + let epoch_participation = VariableList::<_, N>::new(vec![test_flag; len]).unwrap(); + + let mut cache = ParticipationTreeHashCache { inner: None }; + + let cache_root = cache + .recalculate_tree_hash_root(&epoch_participation) + .unwrap(); + let recalc_root = cache + .recalculate_tree_hash_root(&epoch_participation) + .unwrap(); + + assert_eq!(cache_root, recalc_root, "recalculated root should match"); + assert_eq!( + cache_root, + epoch_participation.tree_hash_root(), + "cached root should match uncached" + ); + } } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 4ce57f3ad74..7fbb4ea5f68 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -17,6 +17,8 @@ pub enum Domain { SelectionProof, AggregateAndProof, SyncCommittee, + ContributionAndProof, + SyncCommitteeSelectionProof, } /// Lighthouse's internal configuration struct. @@ -181,6 +183,37 @@ impl ChainSpec { } } + /// Returns the fork version for a named fork. + pub fn fork_version_for_name(&self, fork_name: ForkName) -> [u8; 4] { + match fork_name { + ForkName::Base => self.genesis_fork_version, + ForkName::Altair => self.altair_fork_version, + } + } + + /// For a given fork name, return the epoch at which it activates. + pub fn fork_epoch(&self, fork_name: ForkName) -> Option { + match fork_name { + ForkName::Base => Some(Epoch::new(0)), + ForkName::Altair => self.altair_fork_epoch, + } + } + + /// Returns a full `Fork` struct for a given epoch. + pub fn fork_at_epoch(&self, epoch: Epoch) -> Fork { + let current_fork_name = self.fork_name_at_epoch(epoch); + let previous_fork_name = current_fork_name.previous_fork().unwrap_or(ForkName::Base); + let epoch = self + .fork_epoch(current_fork_name) + .unwrap_or_else(|| Epoch::new(0)); + + Fork { + previous_version: self.fork_version_for_name(previous_fork_name), + current_version: self.fork_version_for_name(current_fork_name), + epoch, + } + } + /// Get the domain number, unmodified by the fork. /// /// Spec v0.12.1 @@ -194,6 +227,8 @@ impl ChainSpec { Domain::SelectionProof => self.domain_selection_proof, Domain::AggregateAndProof => self.domain_aggregate_and_proof, Domain::SyncCommittee => self.domain_sync_committee, + Domain::ContributionAndProof => self.domain_contribution_and_proof, + Domain::SyncCommitteeSelectionProof => self.domain_sync_committee_selection_proof, } } @@ -675,6 +710,18 @@ mod tests { ); test_domain(Domain::SyncCommittee, spec.domain_sync_committee, &spec); } + + // Test that `fork_name_at_epoch` and `fork_epoch` are consistent. + #[test] + fn fork_name_at_epoch_consistency() { + let spec = ChainSpec::mainnet(); + + for fork_name in ForkName::list_all() { + if let Some(fork_epoch) = spec.fork_epoch(fork_name) { + assert_eq!(spec.fork_name_at_epoch(fork_epoch), fork_name); + } + } + } } #[cfg(test)] diff --git a/consensus/types/src/consts.rs b/consensus/types/src/consts.rs index 1001d702a7c..04e8e60ee55 100644 --- a/consensus/types/src/consts.rs +++ b/consensus/types/src/consts.rs @@ -8,6 +8,8 @@ pub mod altair { pub const SYNC_REWARD_WEIGHT: u64 = 2; pub const PROPOSER_WEIGHT: u64 = 8; pub const WEIGHT_DENOMINATOR: u64 = 64; + pub const SYNC_COMMITTEE_SUBNET_COUNT: u64 = 4; + pub const TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE: u64 = 16; pub const PARTICIPATION_FLAG_WEIGHTS: [u64; NUM_FLAG_INDICES] = [ TIMELY_SOURCE_WEIGHT, diff --git a/consensus/types/src/contribution_and_proof.rs b/consensus/types/src/contribution_and_proof.rs new file mode 100644 index 00000000000..e5371f469e0 --- /dev/null +++ b/consensus/types/src/contribution_and_proof.rs @@ -0,0 +1,61 @@ +use super::{ + ChainSpec, EthSpec, Fork, Hash256, SecretKey, Signature, SignedRoot, SyncCommitteeContribution, + SyncSelectionProof, +}; +use crate::test_utils::TestRandom; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +/// A Validators aggregate sync committee contribution and selection proof. +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom, TreeHash)] +#[serde(bound = "T: EthSpec")] +pub struct ContributionAndProof { + /// The index of the validator that created the sync contribution. + #[serde(with = "serde_utils::quoted_u64")] + pub aggregator_index: u64, + /// The aggregate contribution. + pub contribution: SyncCommitteeContribution, + /// A proof provided by the validator that permits them to publish on the + /// `sync_committee_contribution_and_proof` gossipsub topic. + pub selection_proof: Signature, +} + +impl ContributionAndProof { + /// Produces a new `ContributionAndProof` with a `selection_proof` generated by signing + /// `SyncAggregatorSelectionData` with `secret_key`. + /// + /// If `selection_proof.is_none()` it will be computed locally. + pub fn from_aggregate( + aggregator_index: u64, + contribution: SyncCommitteeContribution, + selection_proof: Option, + secret_key: &SecretKey, + fork: &Fork, + genesis_validators_root: Hash256, + spec: &ChainSpec, + ) -> Self { + let selection_proof = selection_proof + .unwrap_or_else(|| { + SyncSelectionProof::new::( + contribution.slot, + contribution.subcommittee_index, + secret_key, + fork, + genesis_validators_root, + spec, + ) + }) + .into(); + + Self { + aggregator_index, + contribution, + selection_proof, + } + } +} + +impl SignedRoot for ContributionAndProof {} diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 8db6d10706f..3f59d2b905b 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -91,6 +91,10 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + /// /// Must be set to `EpochsPerEth1VotingPeriod * SlotsPerEpoch` type SlotsPerEth1VotingPeriod: Unsigned + Clone + Sync + Send + Debug + PartialEq; + /// The size of `sync_subcommittees`. + /// + /// Must be set to `SyncCommitteeSize / SyncCommitteeSubnetCount`. + type SyncSubcommitteeSize: Unsigned + Clone + Sync + Send + Debug + PartialEq; fn default_spec() -> ChainSpec; @@ -171,6 +175,16 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + fn slots_per_eth1_voting_period() -> usize { Self::SlotsPerEth1VotingPeriod::to_usize() } + + /// Returns the `SYNC_COMMITTEE_SIZE` constant for this specification. + fn sync_committee_size() -> usize { + Self::SyncCommitteeSize::to_usize() + } + + /// Returns the `SYNC_COMMITTEE_SIZE / SyncCommitteeSubnetCount`. + fn sync_subcommittee_size() -> usize { + Self::SyncSubcommitteeSize::to_usize() + } } /// Macro to inherit some type values from another EthSpec. @@ -204,6 +218,7 @@ impl EthSpec for MainnetEthSpec { type MaxDeposits = U16; type MaxVoluntaryExits = U16; type SyncCommitteeSize = U512; + type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count type MaxPendingAttestations = U4096; // 128 max attestations * 32 slots per epoch type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch @@ -228,6 +243,7 @@ impl EthSpec for MinimalEthSpec { type EpochsPerHistoricalVector = U64; type EpochsPerSlashingsVector = U64; type SyncCommitteeSize = U32; + type SyncSubcommitteeSize = U8; // 32 committee size / 4 sync committee subnet count type MaxPendingAttestations = U1024; // 128 max attestations * 8 slots per epoch type SlotsPerEth1VotingPeriod = U32; // 4 epochs * 8 slots per epoch diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index b6c939709ae..4941073b67b 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -26,6 +26,26 @@ impl ForkName { } } } + + /// Return the name of the fork immediately prior to the current one. + /// + /// If `self` is `ForkName::Base` then `Base` is returned. + pub fn previous_fork(self) -> Option { + match self { + ForkName::Base => None, + ForkName::Altair => Some(ForkName::Base), + } + } + + /// Return the name of the fork immediately after the current one. + /// + /// If `self` is the last known fork and has no successor, `None` is returned. + pub fn next_fork(self) -> Option { + match self { + ForkName::Base => Some(ForkName::Altair), + ForkName::Altair => None, + } + } } impl std::str::FromStr for ForkName { @@ -45,3 +65,20 @@ pub struct InconsistentFork { pub fork_at_slot: ForkName, pub object_fork: ForkName, } + +#[cfg(test)] +mod test { + use super::*; + use itertools::Itertools; + + #[test] + fn previous_and_next_fork_consistent() { + assert_eq!(ForkName::Altair.next_fork(), None); + assert_eq!(ForkName::Base.previous_fork(), None); + + for (prev_fork, fork) in ForkName::list_all().into_iter().tuple_windows() { + assert_eq!(prev_fork.next_fork(), Some(fork)); + assert_eq!(fork.previous_fork(), Some(prev_fork)); + } + } +} diff --git a/consensus/types/src/graffiti.rs b/consensus/types/src/graffiti.rs index 86e45699b06..af7ea9b1302 100644 --- a/consensus/types/src/graffiti.rs +++ b/consensus/types/src/graffiti.rs @@ -84,7 +84,7 @@ impl Into for GraffitiString { graffiti .get_mut(..graffiti_len) .expect("graffiti_len <= GRAFFITI_BYTES_LEN") - .copy_from_slice(&graffiti_bytes); + .copy_from_slice(graffiti_bytes); graffiti.into() } } diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index b30ca63d8a5..7df65cb269a 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -30,6 +30,7 @@ pub mod beacon_state; pub mod chain_spec; pub mod checkpoint; pub mod consts; +pub mod contribution_and_proof; pub mod deposit; pub mod deposit_data; pub mod deposit_message; @@ -51,6 +52,7 @@ pub mod shuffling_id; pub mod signed_aggregate_and_proof; pub mod signed_beacon_block; pub mod signed_beacon_block_header; +pub mod signed_contribution_and_proof; pub mod signed_voluntary_exit; pub mod signing_data; pub mod validator; @@ -60,13 +62,20 @@ pub mod voluntary_exit; pub mod slot_epoch_macros; pub mod config_and_preset; pub mod participation_flags; +pub mod participation_list; pub mod preset; pub mod slot_epoch; pub mod subnet_id; pub mod sync_aggregate; +pub mod sync_aggregator_selection_data; pub mod sync_committee; +pub mod sync_committee_contribution; +pub mod sync_committee_message; +pub mod sync_selection_proof; +pub mod sync_subnet_id; mod tree_hash_impls; +pub mod slot_data; #[cfg(feature = "sqlite")] pub mod sqlite; @@ -90,6 +99,7 @@ pub use crate::beacon_state::{BeaconTreeHashCache, Error as BeaconStateError, *} pub use crate::chain_spec::{ChainSpec, Config, Domain}; pub use crate::checkpoint::Checkpoint; pub use crate::config_and_preset::ConfigAndPreset; +pub use crate::contribution_and_proof::ContributionAndProof; pub use crate::deposit::{Deposit, DEPOSIT_TREE_DEPTH}; pub use crate::deposit_data::DepositData; pub use crate::deposit_message::DepositMessage; @@ -104,6 +114,7 @@ pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN}; pub use crate::historical_batch::HistoricalBatch; pub use crate::indexed_attestation::IndexedAttestation; pub use crate::participation_flags::ParticipationFlags; +pub use crate::participation_list::ParticipationList; pub use crate::pending_attestation::PendingAttestation; pub use crate::preset::{AltairPreset, BasePreset}; pub use crate::proposer_slashing::ProposerSlashing; @@ -115,12 +126,18 @@ pub use crate::signed_beacon_block::{ SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockHash, }; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; +pub use crate::signed_contribution_and_proof::SignedContributionAndProof; pub use crate::signed_voluntary_exit::SignedVoluntaryExit; pub use crate::signing_data::{SignedRoot, SigningData}; pub use crate::slot_epoch::{Epoch, Slot}; pub use crate::subnet_id::SubnetId; pub use crate::sync_aggregate::SyncAggregate; +pub use crate::sync_aggregator_selection_data::SyncAggregatorSelectionData; pub use crate::sync_committee::SyncCommittee; +pub use crate::sync_committee_contribution::SyncCommitteeContribution; +pub use crate::sync_committee_message::SyncCommitteeMessage; +pub use crate::sync_selection_proof::SyncSelectionProof; +pub use crate::sync_subnet_id::SyncSubnetId; pub use crate::validator::Validator; pub use crate::validator_subscription::ValidatorSubscription; pub use crate::voluntary_exit::VoluntaryExit; diff --git a/consensus/types/src/participation_flags.rs b/consensus/types/src/participation_flags.rs index c0ccb6db2c5..476e7757bb1 100644 --- a/consensus/types/src/participation_flags.rs +++ b/consensus/types/src/participation_flags.rs @@ -28,6 +28,10 @@ impl ParticipationFlags { let mask = 1u8.safe_shl(flag_index as u32)?; Ok(self.bits & mask == mask) } + + pub fn into_u8(self) -> u8 { + self.bits + } } /// Decode implementation that transparently behaves like the inner `u8`. diff --git a/consensus/types/src/participation_list.rs b/consensus/types/src/participation_list.rs new file mode 100644 index 00000000000..89a56cb87d8 --- /dev/null +++ b/consensus/types/src/participation_list.rs @@ -0,0 +1,55 @@ +#![allow(clippy::integer_arithmetic)] + +use crate::{Hash256, ParticipationFlags, Unsigned, VariableList}; +use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, Error, TreeHashCache}; +use tree_hash::{mix_in_length, BYTES_PER_CHUNK}; + +/// Wrapper type allowing the implementation of `CachedTreeHash`. +#[derive(Debug)] +pub struct ParticipationList<'a, N: Unsigned> { + pub inner: &'a VariableList, +} + +impl<'a, N: Unsigned> ParticipationList<'a, N> { + pub fn new(inner: &'a VariableList) -> Self { + Self { inner } + } +} + +impl<'a, N: Unsigned> CachedTreeHash for ParticipationList<'a, N> { + fn new_tree_hash_cache(&self, arena: &mut CacheArena) -> TreeHashCache { + TreeHashCache::new( + arena, + int_log(N::to_usize() / BYTES_PER_CHUNK), + leaf_count(self.inner.len()), + ) + } + + fn recalculate_tree_hash_root( + &self, + arena: &mut CacheArena, + cache: &mut TreeHashCache, + ) -> Result { + Ok(mix_in_length( + &cache.recalculate_merkle_root(arena, leaf_iter(self.inner))?, + self.inner.len(), + )) + } +} + +pub fn leaf_count(len: usize) -> usize { + (len + BYTES_PER_CHUNK - 1) / BYTES_PER_CHUNK +} + +pub fn leaf_iter( + values: &[ParticipationFlags], +) -> impl Iterator + ExactSizeIterator + '_ { + values.chunks(BYTES_PER_CHUNK).map(|xs| { + // Zero-pad chunks on the right. + let mut chunk = [0u8; BYTES_PER_CHUNK]; + for (byte, x) in chunk.iter_mut().zip(xs) { + *byte = x.into_u8(); + } + chunk + }) +} diff --git a/consensus/types/src/signed_aggregate_and_proof.rs b/consensus/types/src/signed_aggregate_and_proof.rs index f612d7074ad..0047bd3ccd4 100644 --- a/consensus/types/src/signed_aggregate_and_proof.rs +++ b/consensus/types/src/signed_aggregate_and_proof.rs @@ -1,6 +1,6 @@ use super::{ - AggregateAndProof, Attestation, ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, - SecretKey, SelectionProof, Signature, SignedRoot, + AggregateAndProof, Attestation, ChainSpec, Domain, EthSpec, Fork, Hash256, SecretKey, + SelectionProof, Signature, SignedRoot, }; use crate::test_utils::TestRandom; use serde_derive::{Deserialize, Serialize}; @@ -60,41 +60,4 @@ impl SignedAggregateAndProof { signature: secret_key.sign(signing_message), } } - - /// Verifies the signature of the `AggregateAndProof` - pub fn is_valid_signature( - &self, - validator_pubkey: &PublicKey, - fork: &Fork, - genesis_validators_root: Hash256, - spec: &ChainSpec, - ) -> bool { - let target_epoch = self.message.aggregate.data.slot.epoch(T::slots_per_epoch()); - let domain = spec.get_domain( - target_epoch, - Domain::AggregateAndProof, - fork, - genesis_validators_root, - ); - let message = self.message.signing_root(domain); - self.signature.verify(validator_pubkey, message) - } - - /// Verifies the signature of the `AggregateAndProof` as well the underlying selection_proof in - /// the contained `AggregateAndProof`. - pub fn is_valid( - &self, - validator_pubkey: &PublicKey, - fork: &Fork, - genesis_validators_root: Hash256, - spec: &ChainSpec, - ) -> bool { - self.is_valid_signature(validator_pubkey, fork, genesis_validators_root, spec) - && self.message.is_valid_selection_proof( - validator_pubkey, - fork, - genesis_validators_root, - spec, - ) - } } diff --git a/consensus/types/src/signed_contribution_and_proof.rs b/consensus/types/src/signed_contribution_and_proof.rs new file mode 100644 index 00000000000..245d33ff485 --- /dev/null +++ b/consensus/types/src/signed_contribution_and_proof.rs @@ -0,0 +1,61 @@ +use super::{ + ChainSpec, ContributionAndProof, Domain, EthSpec, Fork, Hash256, SecretKey, Signature, + SignedRoot, SyncCommitteeContribution, SyncSelectionProof, +}; +use crate::test_utils::TestRandom; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +/// A Validators signed contribution proof to publish on the `sync_committee_contribution_and_proof` +/// gossipsub topic. +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom, TreeHash)] +#[serde(bound = "T: EthSpec")] +pub struct SignedContributionAndProof { + /// The `ContributionAndProof` that was signed. + pub message: ContributionAndProof, + /// The validator's signature of `message`. + pub signature: Signature, +} + +impl SignedContributionAndProof { + /// Produces a new `SignedContributionAndProof` with a `selection_proof` generated by signing + /// `aggregate.data.slot` with `secret_key`. + /// + /// If `selection_proof.is_none()` it will be computed locally. + pub fn from_aggregate( + aggregator_index: u64, + contribution: SyncCommitteeContribution, + selection_proof: Option, + secret_key: &SecretKey, + fork: &Fork, + genesis_validators_root: Hash256, + spec: &ChainSpec, + ) -> Self { + let message = ContributionAndProof::from_aggregate( + aggregator_index, + contribution, + selection_proof, + secret_key, + fork, + genesis_validators_root, + spec, + ); + + let epoch = message.contribution.slot.epoch(T::slots_per_epoch()); + let domain = spec.get_domain( + epoch, + Domain::ContributionAndProof, + fork, + genesis_validators_root, + ); + let signing_message = message.signing_root(domain); + + SignedContributionAndProof { + message, + signature: secret_key.sign(signing_message), + } + } +} diff --git a/consensus/types/src/slot_data.rs b/consensus/types/src/slot_data.rs new file mode 100644 index 00000000000..19775913b98 --- /dev/null +++ b/consensus/types/src/slot_data.rs @@ -0,0 +1,13 @@ +use crate::Slot; + +/// A trait providing a `Slot` getter for messages that are related to a single slot. Useful in +/// making parts of attestation and sync committee processing generic. +pub trait SlotData { + fn get_slot(&self) -> Slot; +} + +impl SlotData for Slot { + fn get_slot(&self) -> Slot { + *self + } +} diff --git a/consensus/types/src/slot_epoch.rs b/consensus/types/src/slot_epoch.rs index 3ed3e8f3c90..3a4060acce3 100644 --- a/consensus/types/src/slot_epoch.rs +++ b/consensus/types/src/slot_epoch.rs @@ -11,10 +11,10 @@ //! may lead to programming errors which are not detected by the compiler. use crate::test_utils::TestRandom; -use crate::SignedRoot; +use crate::{ChainSpec, SignedRoot}; use rand::RngCore; -use safe_arith::SafeArith; +use safe_arith::{ArithError, SafeArith}; use serde_derive::{Deserialize, Serialize}; use ssz::{ssz_encode, Decode, DecodeError, Encode}; use std::fmt; @@ -90,6 +90,13 @@ impl Epoch { } } + /// Compute the sync committee period for an epoch. + pub fn sync_committee_period(&self, spec: &ChainSpec) -> Result { + Ok(self + .safe_div(spec.epochs_per_sync_committee_period)? + .as_u64()) + } + pub fn slot_iter(&self, slots_per_epoch: u64) -> SlotIter { SlotIter { current_iteration: 0, diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index 9b54af1692d..8bdcdb1c3b1 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -24,7 +24,7 @@ pub struct SubnetId(#[serde(with = "serde_utils::quoted_u64")] u64); pub fn subnet_id_to_string(i: u64) -> &'static str { if i < MAX_SUBNET_ID as u64 { - &SUBNET_ID_TO_STRING + SUBNET_ID_TO_STRING .get(i as usize) .expect("index below MAX_SUBNET_ID") } else { diff --git a/consensus/types/src/sync_aggregate.rs b/consensus/types/src/sync_aggregate.rs index ea2298d23a8..781c67374eb 100644 --- a/consensus/types/src/sync_aggregate.rs +++ b/consensus/types/src/sync_aggregate.rs @@ -1,10 +1,24 @@ +use crate::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use crate::test_utils::TestRandom; -use crate::{AggregateSignature, BitVector, EthSpec}; +use crate::{AggregateSignature, BitVector, EthSpec, SyncCommitteeContribution}; +use safe_arith::{ArithError, SafeArith}; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +#[derive(Debug, PartialEq)] +pub enum Error { + SszTypesError(ssz_types::Error), + ArithError(ArithError), +} + +impl From for Error { + fn from(e: ArithError) -> Error { + Error::ArithError(e) + } +} + #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] #[serde(bound = "T: EthSpec")] @@ -23,6 +37,34 @@ impl SyncAggregate { } } + /// Create a `SyncAggregate` from a slice of `SyncCommitteeContribution`s. + /// + /// Equivalent to `process_sync_committee_contributions` from the spec. + pub fn from_contributions( + contributions: &[SyncCommitteeContribution], + ) -> Result, Error> { + let mut sync_aggregate = Self::new(); + let sync_subcommittee_size = + T::sync_committee_size().safe_div(SYNC_COMMITTEE_SUBNET_COUNT as usize)?; + for contribution in contributions { + for (index, participated) in contribution.aggregation_bits.iter().enumerate() { + if participated { + let participant_index = sync_subcommittee_size + .safe_mul(contribution.subcommittee_index as usize)? + .safe_add(index)?; + sync_aggregate + .sync_committee_bits + .set(participant_index, true) + .map_err(Error::SszTypesError)?; + } + } + sync_aggregate + .sync_committee_signature + .add_assign_aggregate(&contribution.signature); + } + Ok(sync_aggregate) + } + /// Empty aggregate to be used at genesis. /// /// Contains an empty signature and should *not* be used as the starting point for aggregation, diff --git a/consensus/types/src/sync_aggregator_selection_data.rs b/consensus/types/src/sync_aggregator_selection_data.rs new file mode 100644 index 00000000000..94cdee3c3e1 --- /dev/null +++ b/consensus/types/src/sync_aggregator_selection_data.rs @@ -0,0 +1,25 @@ +use crate::test_utils::TestRandom; +use crate::{SignedRoot, Slot}; + +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive( + Debug, PartialEq, Clone, Serialize, Deserialize, Hash, Encode, Decode, TreeHash, TestRandom, +)] +pub struct SyncAggregatorSelectionData { + pub slot: Slot, + pub subcommittee_index: u64, +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(SyncAggregatorSelectionData); +} + +impl SignedRoot for SyncAggregatorSelectionData {} diff --git a/consensus/types/src/sync_committee.rs b/consensus/types/src/sync_committee.rs index 085f0bc04fe..784fb0ce14a 100644 --- a/consensus/types/src/sync_committee.rs +++ b/consensus/types/src/sync_committee.rs @@ -1,12 +1,30 @@ use crate::test_utils::TestRandom; use crate::typenum::Unsigned; -use crate::{EthSpec, FixedVector}; +use crate::{EthSpec, FixedVector, SyncSubnetId}; use bls::PublicKeyBytes; +use safe_arith::{ArithError, SafeArith}; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use std::collections::HashMap; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +#[derive(Debug, PartialEq)] +pub enum Error { + ArithError(ArithError), + InvalidSubcommitteeRange { + start_subcommittee_index: usize, + end_subcommittee_index: usize, + subcommittee_index: usize, + }, +} + +impl From for Error { + fn from(e: ArithError) -> Error { + Error::ArithError(e) + } +} + #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] #[serde(bound = "T: EthSpec")] @@ -26,4 +44,44 @@ impl SyncCommittee { aggregate_pubkey: PublicKeyBytes::empty(), }) } + + /// Return the pubkeys in this `SyncCommittee` for the given `subcommittee_index`. + pub fn get_subcommittee_pubkeys( + &self, + subcommittee_index: usize, + ) -> Result, Error> { + let start_subcommittee_index = subcommittee_index.safe_mul(T::sync_subcommittee_size())?; + let end_subcommittee_index = + start_subcommittee_index.safe_add(T::sync_subcommittee_size())?; + self.pubkeys + .get(start_subcommittee_index..end_subcommittee_index) + .ok_or(Error::InvalidSubcommitteeRange { + start_subcommittee_index, + end_subcommittee_index, + subcommittee_index, + }) + .map(|s| s.to_vec()) + } + + /// For a given `pubkey`, finds all subcommittees that it is included in, and maps the + /// subcommittee index (typed as `SyncSubnetId`) to all positions this `pubkey` is associated + /// with within the subcommittee. + pub fn subcommittee_positions_for_public_key( + &self, + pubkey: &PublicKeyBytes, + ) -> Result>, Error> { + let mut subnet_positions = HashMap::new(); + for (committee_index, validator_pubkey) in self.pubkeys.iter().enumerate() { + if pubkey == validator_pubkey { + let subcommittee_index = committee_index.safe_div(T::sync_subcommittee_size())?; + let position_in_subcommittee = + committee_index.safe_rem(T::sync_subcommittee_size())?; + subnet_positions + .entry(SyncSubnetId::new(subcommittee_index as u64)) + .or_insert_with(Vec::new) + .push(position_in_subcommittee); + } + } + Ok(subnet_positions) + } } diff --git a/consensus/types/src/sync_committee_contribution.rs b/consensus/types/src/sync_committee_contribution.rs new file mode 100644 index 00000000000..a2934090be6 --- /dev/null +++ b/consensus/types/src/sync_committee_contribution.rs @@ -0,0 +1,113 @@ +use super::{AggregateSignature, EthSpec, SignedRoot}; +use crate::slot_data::SlotData; +use crate::{test_utils::TestRandom, BitVector, Hash256, Slot, SyncCommitteeMessage}; +use safe_arith::ArithError; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive(Debug, PartialEq)] +pub enum Error { + SszTypesError(ssz_types::Error), + AlreadySigned(usize), + SubnetCountIsZero(ArithError), +} + +/// An aggregation of `SyncCommitteeMessage`s, used in creating a `SignedContributionAndProof`. +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[serde(bound = "T: EthSpec")] +pub struct SyncCommitteeContribution { + pub slot: Slot, + pub beacon_block_root: Hash256, + pub subcommittee_index: u64, + pub aggregation_bits: BitVector, + pub signature: AggregateSignature, +} + +impl SyncCommitteeContribution { + /// Create a `SyncCommitteeContribution` from: + /// + /// - `message`: A single `SyncCommitteeMessage`. + /// - `subcommittee_index`: The subcommittee this contribution pertains to out of the broader + /// sync committee. This can be determined from the `SyncSubnetId` of the gossip subnet + /// this message was seen on. + /// - `validator_sync_committee_index`: The index of the validator **within** the subcommittee. + pub fn from_message( + message: &SyncCommitteeMessage, + subcommittee_index: u64, + validator_sync_committee_index: usize, + ) -> Result { + let mut bits = BitVector::new(); + bits.set(validator_sync_committee_index, true) + .map_err(Error::SszTypesError)?; + Ok(Self { + slot: message.slot, + beacon_block_root: message.beacon_block_root, + subcommittee_index, + aggregation_bits: bits, + signature: AggregateSignature::from(&message.signature), + }) + } + + /// Are the aggregation bitfields of these sync contribution disjoint? + pub fn signers_disjoint_from(&self, other: &Self) -> bool { + self.aggregation_bits + .intersection(&other.aggregation_bits) + .is_zero() + } + + /// Aggregate another `SyncCommitteeContribution` into this one. + /// + /// The aggregation bitfields must be disjoint, and the data must be the same. + pub fn aggregate(&mut self, other: &Self) { + debug_assert_eq!(self.slot, other.slot); + debug_assert_eq!(self.beacon_block_root, other.beacon_block_root); + debug_assert_eq!(self.subcommittee_index, other.subcommittee_index); + debug_assert!(self.signers_disjoint_from(other)); + + self.aggregation_bits = self.aggregation_bits.union(&other.aggregation_bits); + self.signature.add_assign_aggregate(&other.signature); + } +} + +impl SignedRoot for Hash256 {} + +/// This is not in the spec, but useful for determining uniqueness of sync committee contributions +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +pub struct SyncContributionData { + slot: Slot, + beacon_block_root: Hash256, + subcommittee_index: u64, +} + +impl SyncContributionData { + pub fn from_contribution(signing_data: &SyncCommitteeContribution) -> Self { + Self { + slot: signing_data.slot, + beacon_block_root: signing_data.beacon_block_root, + subcommittee_index: signing_data.subcommittee_index, + } + } +} + +impl SlotData for SyncCommitteeContribution { + fn get_slot(&self) -> Slot { + self.slot + } +} + +impl SlotData for SyncContributionData { + fn get_slot(&self) -> Slot { + self.slot + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::*; + + ssz_and_tree_hash_tests!(SyncCommitteeContribution); +} diff --git a/consensus/types/src/sync_committee_message.rs b/consensus/types/src/sync_committee_message.rs new file mode 100644 index 00000000000..7a2f7193fb1 --- /dev/null +++ b/consensus/types/src/sync_committee_message.rs @@ -0,0 +1,57 @@ +use crate::test_utils::TestRandom; +use crate::{ChainSpec, Domain, EthSpec, Fork, Hash256, SecretKey, Signature, SignedRoot, Slot}; + +use crate::slot_data::SlotData; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +/// The data upon which a `SyncCommitteeContribution` is based. +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +pub struct SyncCommitteeMessage { + pub slot: Slot, + pub beacon_block_root: Hash256, + #[serde(with = "serde_utils::quoted_u64")] + pub validator_index: u64, + // Signature by the validator over `beacon_block_root`. + pub signature: Signature, +} + +impl SyncCommitteeMessage { + /// Equivalent to `get_sync_committee_message` from the spec. + pub fn new( + slot: Slot, + beacon_block_root: Hash256, + validator_index: u64, + secret_key: &SecretKey, + fork: &Fork, + genesis_validators_root: Hash256, + spec: &ChainSpec, + ) -> Self { + let epoch = slot.epoch(E::slots_per_epoch()); + let domain = spec.get_domain(epoch, Domain::SyncCommittee, fork, genesis_validators_root); + let message = beacon_block_root.signing_root(domain); + let signature = secret_key.sign(message); + Self { + slot, + beacon_block_root, + validator_index, + signature, + } + } +} + +impl SlotData for SyncCommitteeMessage { + fn get_slot(&self) -> Slot { + self.slot + } +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(SyncCommitteeMessage); +} diff --git a/consensus/types/src/sync_selection_proof.rs b/consensus/types/src/sync_selection_proof.rs new file mode 100644 index 00000000000..51395c0c135 --- /dev/null +++ b/consensus/types/src/sync_selection_proof.rs @@ -0,0 +1,139 @@ +use crate::consts::altair::{ + SYNC_COMMITTEE_SUBNET_COUNT, TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE, +}; +use crate::{ + ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, SecretKey, Signature, SignedRoot, Slot, + SyncAggregatorSelectionData, +}; +use eth2_hashing::hash; +use safe_arith::{ArithError, SafeArith}; +use ssz::Encode; +use ssz_types::typenum::Unsigned; +use std::cmp; +use std::convert::TryInto; + +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive(PartialEq, Debug, Clone)] +pub struct SyncSelectionProof(Signature); + +impl SyncSelectionProof { + pub fn new( + slot: Slot, + subcommittee_index: u64, + secret_key: &SecretKey, + fork: &Fork, + genesis_validators_root: Hash256, + spec: &ChainSpec, + ) -> Self { + let domain = spec.get_domain( + slot.epoch(T::slots_per_epoch()), + Domain::SyncCommitteeSelectionProof, + fork, + genesis_validators_root, + ); + let message = SyncAggregatorSelectionData { + slot, + subcommittee_index, + } + .signing_root(domain); + + Self(secret_key.sign(message)) + } + + /// Returns the "modulo" used for determining if a `SyncSelectionProof` elects an aggregator. + pub fn modulo() -> Result { + Ok(cmp::max( + 1, + (T::SyncCommitteeSize::to_u64()) + .safe_div(SYNC_COMMITTEE_SUBNET_COUNT)? + .safe_div(TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE)?, + )) + } + + pub fn is_aggregator(&self) -> Result { + self.is_aggregator_from_modulo(Self::modulo::()?) + } + + pub fn is_aggregator_from_modulo(&self, modulo: u64) -> Result { + let signature_hash = hash(&self.0.as_ssz_bytes()); + let signature_hash_int = u64::from_le_bytes( + signature_hash + .get(0..8) + .expect("hash is 32 bytes") + .try_into() + .expect("first 8 bytes of signature should always convert to fixed array"), + ); + + signature_hash_int.safe_rem(modulo).map(|rem| rem == 0) + } + + pub fn verify( + &self, + slot: Slot, + subcommittee_index: u64, + pubkey: &PublicKey, + fork: &Fork, + genesis_validators_root: Hash256, + spec: &ChainSpec, + ) -> bool { + let domain = spec.get_domain( + slot.epoch(T::slots_per_epoch()), + Domain::SyncCommitteeSelectionProof, + fork, + genesis_validators_root, + ); + let message = SyncAggregatorSelectionData { + slot, + subcommittee_index, + } + .signing_root(domain); + + self.0.verify(pubkey, message) + } +} + +impl Into for SyncSelectionProof { + fn into(self) -> Signature { + self.0 + } +} + +impl From for SyncSelectionProof { + fn from(sig: Signature) -> Self { + Self(sig) + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::MainnetEthSpec; + use eth2_interop_keypairs::keypair; + + #[test] + fn proof_sign_and_verify() { + let slot = Slot::new(1000); + let subcommittee_index = 12; + let key = keypair(1); + let fork = &Fork::default(); + let genesis_validators_root = Hash256::zero(); + let spec = &ChainSpec::mainnet(); + + let proof = SyncSelectionProof::new::( + slot, + subcommittee_index, + &key.sk, + fork, + genesis_validators_root, + spec, + ); + assert!(proof.verify::( + slot, + subcommittee_index, + &key.pk, + fork, + genesis_validators_root, + spec + )); + } +} diff --git a/consensus/types/src/sync_subnet_id.rs b/consensus/types/src/sync_subnet_id.rs new file mode 100644 index 00000000000..fba0b2993ea --- /dev/null +++ b/consensus/types/src/sync_subnet_id.rs @@ -0,0 +1,74 @@ +//! Identifies each sync committee subnet by an integer identifier. +use crate::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; +use serde_derive::{Deserialize, Serialize}; +use std::ops::{Deref, DerefMut}; + +lazy_static! { + static ref SYNC_SUBNET_ID_TO_STRING: Vec = { + let mut v = Vec::with_capacity(SYNC_COMMITTEE_SUBNET_COUNT as usize); + + for i in 0..SYNC_COMMITTEE_SUBNET_COUNT { + v.push(i.to_string()); + } + v + }; +} + +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(transparent)] +pub struct SyncSubnetId(#[serde(with = "serde_utils::quoted_u64")] u64); + +pub fn sync_subnet_id_to_string(i: u64) -> &'static str { + if i < SYNC_COMMITTEE_SUBNET_COUNT { + SYNC_SUBNET_ID_TO_STRING + .get(i as usize) + .expect("index below SYNC_COMMITTEE_SUBNET_COUNT") + } else { + "sync subnet id out of range" + } +} + +impl SyncSubnetId { + pub fn new(id: u64) -> Self { + id.into() + } +} + +impl Deref for SyncSubnetId { + type Target = u64; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for SyncSubnetId { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl From for SyncSubnetId { + fn from(x: u64) -> Self { + Self(x) + } +} + +impl Into for SyncSubnetId { + fn into(self) -> u64 { + self.0 + } +} + +impl Into for &SyncSubnetId { + fn into(self) -> u64 { + self.0 + } +} + +impl AsRef for SyncSubnetId { + fn as_ref(&self) -> &str { + sync_subnet_id_to_string(self.0) + } +} diff --git a/consensus/types/src/test_utils/test_random.rs b/consensus/types/src/test_utils/test_random.rs index 5a88c166308..bafbdca5f4a 100644 --- a/consensus/types/src/test_utils/test_random.rs +++ b/consensus/types/src/test_utils/test_random.rs @@ -3,6 +3,7 @@ use rand::RngCore; use rand::SeedableRng; use rand_xorshift::XorShiftRng; use ssz_types::typenum::Unsigned; +use std::sync::Arc; mod address; mod aggregate_signature; @@ -68,6 +69,15 @@ where } } +impl TestRandom for Arc +where + U: TestRandom, +{ + fn random_for_test(rng: &mut impl RngCore) -> Self { + Arc::new(U::random_for_test(rng)) + } +} + impl TestRandom for FixedVector where T: TestRandom, diff --git a/consensus/types/src/tree_hash_impls.rs b/consensus/types/src/tree_hash_impls.rs index df0ba2ed2ad..ec23927d30c 100644 --- a/consensus/types/src/tree_hash_impls.rs +++ b/consensus/types/src/tree_hash_impls.rs @@ -77,7 +77,7 @@ fn process_pubkey_bytes_field( fn process_slice_field(new_tree_hash: &[u8], leaf: &mut Hash256, force_update: bool) -> bool { if force_update || leaf.as_bytes() != new_tree_hash { - leaf.assign_from_slice(&new_tree_hash); + leaf.assign_from_slice(new_tree_hash); true } else { false diff --git a/crypto/bls/src/generic_aggregate_signature.rs b/crypto/bls/src/generic_aggregate_signature.rs index 7569c2f7931..ecffc22bc15 100644 --- a/crypto/bls/src/generic_aggregate_signature.rs +++ b/crypto/bls/src/generic_aggregate_signature.rs @@ -110,6 +110,11 @@ where self.point.is_none() } + /// Returns `true` if `self` is equal to the point at infinity. + pub fn is_infinity(&self) -> bool { + self.is_infinity + } + /// Returns a reference to the underlying BLS point. pub(crate) fn point(&self) -> Option<&AggSig> { self.point.as_ref() @@ -189,18 +194,6 @@ where } } - /// Wrapper to `fast_aggregate_verify` accepting the infinity signature when `pubkeys` is empty. - pub fn eth2_fast_aggregate_verify( - &self, - msg: Hash256, - pubkeys: &[&GenericPublicKey], - ) -> bool { - if pubkeys.is_empty() && self.is_infinity { - return true; - } - self.fast_aggregate_verify(msg, pubkeys) - } - /// Verify that `self` represents an aggregate signature where all `pubkeys` have signed their /// corresponding message in `msgs`. /// @@ -219,6 +212,20 @@ where } } +/// Allow aggregate signatures to be created from single signatures. +impl From<&GenericSignature> + for GenericAggregateSignature +where + Sig: TSignature, + AggSig: TAggregateSignature, +{ + fn from(sig: &GenericSignature) -> Self { + let mut agg = Self::infinity(); + agg.add_assign(sig); + agg + } +} + impl Encode for GenericAggregateSignature where Sig: TSignature, diff --git a/crypto/bls/src/impls/blst.rs b/crypto/bls/src/impls/blst.rs index e94f5a9abd0..bd28abff9fb 100644 --- a/crypto/bls/src/impls/blst.rs +++ b/crypto/bls/src/impls/blst.rs @@ -132,7 +132,7 @@ impl TPublicKey for blst_core::PublicKey { expected: PUBLIC_KEY_BYTES_LEN, }); } - Self::key_validate(&bytes).map_err(Into::into) + Self::key_validate(bytes).map_err(Into::into) } } @@ -278,6 +278,6 @@ impl TSecretKey for blst_core::Secre } fn deserialize(bytes: &[u8]) -> Result { - Self::from_bytes(&bytes).map_err(Into::into) + Self::from_bytes(bytes).map_err(Into::into) } } diff --git a/crypto/bls/src/impls/fake_crypto.rs b/crypto/bls/src/impls/fake_crypto.rs index 1004dc20034..35582df380e 100644 --- a/crypto/bls/src/impls/fake_crypto.rs +++ b/crypto/bls/src/impls/fake_crypto.rs @@ -146,7 +146,7 @@ impl TAggregateSignature for Aggregate fn deserialize(bytes: &[u8]) -> Result { let mut key = [0; SIGNATURE_BYTES_LEN]; - key[..].copy_from_slice(&bytes); + key[..].copy_from_slice(bytes); Ok(Self(key)) } diff --git a/crypto/bls/src/impls/milagro.rs b/crypto/bls/src/impls/milagro.rs index 7eaa9ad105e..eb4767d3c70 100644 --- a/crypto/bls/src/impls/milagro.rs +++ b/crypto/bls/src/impls/milagro.rs @@ -82,7 +82,7 @@ impl TPublicKey for milagro::PublicKey { } fn deserialize(bytes: &[u8]) -> Result { - Self::from_bytes(&bytes).map_err(Into::into) + Self::from_bytes(bytes).map_err(Into::into) } } @@ -189,6 +189,6 @@ impl TSecretKey for milagro::SecretKey { } fn deserialize(bytes: &[u8]) -> Result { - Self::from_bytes(&bytes).map_err(Into::into) + Self::from_bytes(bytes).map_err(Into::into) } } diff --git a/crypto/bls/tests/tests.rs b/crypto/bls/tests/tests.rs index 3289f0ab229..ad498dbfa87 100644 --- a/crypto/bls/tests/tests.rs +++ b/crypto/bls/tests/tests.rs @@ -34,6 +34,7 @@ macro_rules! test_suite { AggregateSignature::deserialize(&INFINITY_SIGNATURE).unwrap(), AggregateSignature::infinity(), ); + assert!(AggregateSignature::infinity().is_infinity()); } #[test] @@ -297,6 +298,17 @@ macro_rules! test_suite { .assert_single_message_verify(true) } + /// Adding two infinity signatures should yield the infinity signature. + #[test] + fn add_two_infinity_signatures() { + let tester = AggregateSignatureTester::new_with_single_msg(1) + .infinity_sig() + .aggregate_infinity_sig(); + assert!(tester.sig.is_infinity()); + assert_eq!(tester.sig, AggregateSignature::infinity()); + tester.assert_single_message_verify(false) + } + /// The wrong signature should not verify. #[test] fn fast_aggregate_verify_wrong_signature() { diff --git a/crypto/eth2_key_derivation/src/derived_key.rs b/crypto/eth2_key_derivation/src/derived_key.rs index a783a511df3..1598619dfb9 100644 --- a/crypto/eth2_key_derivation/src/derived_key.rs +++ b/crypto/eth2_key_derivation/src/derived_key.rs @@ -127,7 +127,7 @@ fn mod_r(bytes: &[u8]) -> ZeroizeHash { debug_assert!(x_slice.len() <= HASH_SIZE); let mut output = ZeroizeHash::zero(); - output.as_mut_bytes()[HASH_SIZE - x_slice.len()..].copy_from_slice(&x_slice); + output.as_mut_bytes()[HASH_SIZE - x_slice.len()..].copy_from_slice(x_slice); output } diff --git a/crypto/eth2_keystore/src/keystore.rs b/crypto/eth2_keystore/src/keystore.rs index c32277fac5f..b5846b22847 100644 --- a/crypto/eth2_keystore/src/keystore.rs +++ b/crypto/eth2_keystore/src/keystore.rs @@ -377,7 +377,7 @@ pub fn encrypt( password.retain(|c| !is_control_character(c)); - let derived_key = derive_key(&password.as_ref(), &kdf)?; + let derived_key = derive_key(password.as_ref(), kdf)?; // Encrypt secret. let mut cipher_text = plain_text.to_vec(); @@ -389,7 +389,7 @@ pub fn encrypt( // AES Encrypt let key = GenericArray::from_slice(&derived_key.as_bytes()[0..16]); let nonce = GenericArray::from_slice(params.iv.as_bytes()); - let mut cipher = AesCtr::new(&key, &nonce); + let mut cipher = AesCtr::new(key, nonce); cipher.apply_keystream(&mut cipher_text); } }; @@ -435,7 +435,7 @@ pub fn decrypt(password: &[u8], crypto: &Crypto) -> Result { // AES Decrypt let key = GenericArray::from_slice(&derived_key.as_bytes()[0..16]); let nonce = GenericArray::from_slice(params.iv.as_bytes()); - let mut cipher = AesCtr::new(&key, &nonce); + let mut cipher = AesCtr::new(key, nonce); cipher.apply_keystream(plain_text.as_mut_bytes()); } }; diff --git a/crypto/eth2_keystore/tests/json.rs b/crypto/eth2_keystore/tests/json.rs index 93e89156cc6..a6cf5e3812b 100644 --- a/crypto/eth2_keystore/tests/json.rs +++ b/crypto/eth2_keystore/tests/json.rs @@ -41,7 +41,7 @@ fn scrypt_reference() { } "#; - assert!(Keystore::from_json_str(&vector).is_ok()); + assert!(Keystore::from_json_str(vector).is_ok()); } #[test] @@ -79,7 +79,7 @@ fn pbkdf2_reference() { } "#; - assert!(Keystore::from_json_str(&vector).is_ok()); + assert!(Keystore::from_json_str(vector).is_ok()); } #[test] @@ -119,7 +119,7 @@ fn additional_top_level_key() { } "#; - match Keystore::from_json_str(&vector) { + match Keystore::from_json_str(vector) { Err(Error::InvalidJson(_)) => {} _ => panic!("expected invalid json error"), } @@ -162,7 +162,7 @@ fn additional_cipher_key() { } "#; - match Keystore::from_json_str(&vector) { + match Keystore::from_json_str(vector) { Err(Error::InvalidJson(_)) => {} _ => panic!("expected invalid json error"), } @@ -205,7 +205,7 @@ fn additional_checksum_key() { } "#; - match Keystore::from_json_str(&vector) { + match Keystore::from_json_str(vector) { Err(Error::InvalidJson(_)) => {} _ => panic!("expected invalid json error"), } @@ -248,7 +248,7 @@ fn additional_kdf_key() { } "#; - match Keystore::from_json_str(&vector) { + match Keystore::from_json_str(vector) { Err(Error::InvalidJson(_)) => {} _ => panic!("expected invalid json error"), } @@ -291,7 +291,7 @@ fn additional_crypto_key() { } "#; - match Keystore::from_json_str(&vector) { + match Keystore::from_json_str(vector) { Err(Error::InvalidJson(_)) => {} _ => panic!("expected invalid json error"), } @@ -333,7 +333,7 @@ fn bad_version() { } "#; - match Keystore::from_json_str(&vector) { + match Keystore::from_json_str(vector) { Err(Error::InvalidJson(_)) => {} _ => panic!("expected invalid json error"), } @@ -377,7 +377,7 @@ fn json_bad_checksum() { "#; assert_eq!( - Keystore::from_json_str(&vector) + Keystore::from_json_str(vector) .unwrap() .decrypt_keypair("testpassword".as_bytes()) .err() @@ -422,7 +422,7 @@ fn kdf_function() { } "#; - match Keystore::from_json_str(&vector) { + match Keystore::from_json_str(vector) { Err(Error::InvalidJson(_)) => {} _ => panic!("expected invalid json error"), } @@ -463,7 +463,7 @@ fn missing_scrypt_param() { } "#; - match Keystore::from_json_str(&vector) { + match Keystore::from_json_str(vector) { Err(Error::InvalidJson(_)) => {} _ => panic!("expected invalid json error"), } @@ -506,7 +506,7 @@ fn additional_scrypt_param() { } "#; - match Keystore::from_json_str(&vector) { + match Keystore::from_json_str(vector) { Err(Error::InvalidJson(_)) => {} _ => panic!("expected invalid json error"), } @@ -548,7 +548,7 @@ fn checksum_function() { } "#; - match Keystore::from_json_str(&vector) { + match Keystore::from_json_str(vector) { Err(Error::InvalidJson(_)) => {} _ => panic!("expected invalid json error"), } @@ -592,7 +592,7 @@ fn checksum_params() { } "#; - match Keystore::from_json_str(&vector) { + match Keystore::from_json_str(vector) { Err(Error::InvalidJson(_)) => {} _ => panic!("expected invalid json error"), } @@ -634,7 +634,7 @@ fn kdf_message() { } "#; - match Keystore::from_json_str(&vector) { + match Keystore::from_json_str(vector) { Err(Error::InvalidJson(_)) => {} _ => panic!("expected invalid json error"), } @@ -676,7 +676,7 @@ fn cipher_function() { } "#; - match Keystore::from_json_str(&vector) { + match Keystore::from_json_str(vector) { Err(Error::InvalidJson(_)) => {} _ => panic!("expected invalid json error"), } @@ -719,7 +719,7 @@ fn additional_cipher_param() { } "#; - match Keystore::from_json_str(&vector) { + match Keystore::from_json_str(vector) { Err(Error::InvalidJson(_)) => {} _ => panic!("expected invalid json error"), } @@ -759,7 +759,7 @@ fn missing_cipher_param() { } "#; - match Keystore::from_json_str(&vector) { + match Keystore::from_json_str(vector) { Err(Error::InvalidJson(_)) => {} _ => panic!("expected invalid json error"), } @@ -800,7 +800,7 @@ fn missing_pubkey() { } "#; - match Keystore::from_json_str(&vector) { + match Keystore::from_json_str(vector) { Err(Error::InvalidJson(_)) => {} _ => panic!("expected invalid json error"), } @@ -841,7 +841,7 @@ fn missing_path() { } "#; - assert!(Keystore::from_json_str(&vector).is_ok()); + assert!(Keystore::from_json_str(vector).is_ok()); } #[test] @@ -879,7 +879,7 @@ fn missing_version() { } "#; - match Keystore::from_json_str(&vector) { + match Keystore::from_json_str(vector) { Err(Error::InvalidJson(_)) => {} _ => panic!("expected invalid json error"), } @@ -920,7 +920,7 @@ fn pbkdf2_bad_hmac() { } "#; - match Keystore::from_json_str(&vector) { + match Keystore::from_json_str(vector) { Err(Error::InvalidJson(_)) => {} _ => panic!("expected invalid json error"), } @@ -962,7 +962,7 @@ fn pbkdf2_additional_parameter() { } "#; - match Keystore::from_json_str(&vector) { + match Keystore::from_json_str(vector) { Err(Error::InvalidJson(_)) => {} _ => panic!("expected invalid json error"), } @@ -1002,7 +1002,7 @@ fn pbkdf2_missing_parameter() { } "#; - match Keystore::from_json_str(&vector) { + match Keystore::from_json_str(vector) { Err(Error::InvalidJson(_)) => {} _ => panic!("expected invalid json error"), } @@ -1045,5 +1045,5 @@ fn name_field() { } "#; - assert!(Keystore::from_json_str(&vector).is_ok()); + assert!(Keystore::from_json_str(vector).is_ok()); } diff --git a/crypto/eth2_keystore/tests/params.rs b/crypto/eth2_keystore/tests/params.rs index 71e37a1ed55..bfe5f5eb81a 100644 --- a/crypto/eth2_keystore/tests/params.rs +++ b/crypto/eth2_keystore/tests/params.rs @@ -5,7 +5,7 @@ use eth2_keystore::{Error, Keystore}; const PASSWORD: &str = "testpassword"; fn decrypt_error(vector: &str) -> Error { - Keystore::from_json_str(&vector) + Keystore::from_json_str(vector) .unwrap() .decrypt_keypair(PASSWORD.as_bytes()) .err() diff --git a/crypto/eth2_wallet/src/wallet.rs b/crypto/eth2_wallet/src/wallet.rs index a03e63557f3..7a7d65f654c 100644 --- a/crypto/eth2_wallet/src/wallet.rs +++ b/crypto/eth2_wallet/src/wallet.rs @@ -138,7 +138,7 @@ impl Wallet { name: String, nextaccount: u32, ) -> Result { - let (cipher_text, checksum) = encrypt(&seed, &password, &kdf, &cipher)?; + let (cipher_text, checksum) = encrypt(seed, password, &kdf, &cipher)?; Ok(Self { json: JsonWallet { @@ -192,7 +192,7 @@ impl Wallet { // incrementing `nextaccount`. let derive = |key_type: KeyType, password: &[u8]| -> Result { let (secret, path) = - recover_validator_secret(&self, wallet_password, self.json.nextaccount, key_type)?; + recover_validator_secret(self, wallet_password, self.json.nextaccount, key_type)?; let keypair = keypair_from_secret(secret.as_bytes())?; diff --git a/crypto/eth2_wallet/tests/eip2386_vectors.rs b/crypto/eth2_wallet/tests/eip2386_vectors.rs index db98b2e9b16..bb632b46115 100644 --- a/crypto/eth2_wallet/tests/eip2386_vectors.rs +++ b/crypto/eth2_wallet/tests/eip2386_vectors.rs @@ -48,7 +48,7 @@ fn eip2386_test_vector_scrypt() { } "#; - let wallet = decode_and_check_seed(&vector); + let wallet = decode_and_check_seed(vector); assert_eq!( *wallet.uuid(), Uuid::parse_str("b74559b8-ed56-4841-b25c-dba1b7c9d9d5").unwrap(), diff --git a/crypto/eth2_wallet/tests/json.rs b/crypto/eth2_wallet/tests/json.rs index 464f54cd757..3c1c8a72316 100644 --- a/crypto/eth2_wallet/tests/json.rs +++ b/crypto/eth2_wallet/tests/json.rs @@ -1,7 +1,7 @@ use eth2_wallet::{Error, KeystoreError, Wallet}; fn assert_bad_json(json: &str) { - match Wallet::from_json_str(&json) { + match Wallet::from_json_str(json) { Err(Error::KeystoreError(KeystoreError::InvalidJson(_))) => {} _ => panic!("expected invalid json error"), } @@ -48,7 +48,7 @@ fn additional_top_level_param() { } "#; - assert_bad_json(&vector); + assert_bad_json(vector); } #[test] @@ -86,7 +86,7 @@ fn missing_top_level_param() { } "#; - assert_bad_json(&vector); + assert_bad_json(vector); } #[test] @@ -125,7 +125,7 @@ fn bad_version() { } "#; - assert_bad_json(&vector); + assert_bad_json(vector); } #[test] @@ -164,7 +164,7 @@ fn bad_uuid() { } "#; - assert_bad_json(&vector); + assert_bad_json(vector); } #[test] @@ -203,7 +203,7 @@ fn bad_type() { } "#; - assert_bad_json(&vector); + assert_bad_json(vector); } #[test] @@ -242,5 +242,5 @@ fn more_that_u32_nextaccount() { } "#; - assert_bad_json(&vector); + assert_bad_json(vector); } diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index c12deda9722..1d311ec8cd9 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -27,7 +27,7 @@ dirs = "3.0.1" genesis = { path = "../beacon_node/genesis" } deposit_contract = { path = "../common/deposit_contract" } tree_hash = "0.1.1" -tokio = { version = "1.1.0", features = ["full"] } +tokio = { version = "1.7.1", features = ["full"] } clap_utils = { path = "../common/clap_utils" } eth2_libp2p = { path = "../beacon_node/eth2_libp2p" } validator_dir = { path = "../common/validator_dir", features = ["insecure_keys"] } diff --git a/lcli/Dockerfile b/lcli/Dockerfile new file mode 100644 index 00000000000..47ce737c9f6 --- /dev/null +++ b/lcli/Dockerfile @@ -0,0 +1,13 @@ +# `lcli` requires the full project to be in scope, so this should be built either: +# - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` +# - from the current directory with the command: `docker build -f ./Dockerfile ../` +FROM rust:1.53.0 AS builder +RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake +COPY . lighthouse +ARG PORTABLE +ENV PORTABLE $PORTABLE +RUN cd lighthouse && make install-lcli + +FROM debian:buster-slim +RUN apt-get update && apt-get -y upgrade && apt-get clean && rm -rf /var/lib/apt/lists/* +COPY --from=builder /usr/local/cargo/bin/lcli /usr/local/bin/lcli diff --git a/lcli/src/generate_bootnode_enr.rs b/lcli/src/generate_bootnode_enr.rs index e2eace26854..966081a4b62 100644 --- a/lcli/src/generate_bootnode_enr.rs +++ b/lcli/src/generate_bootnode_enr.rs @@ -47,7 +47,7 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { let mut enr_file = File::create(output_dir.join(ENR_FILENAME)) .map_err(|e| format!("Unable to create {}: {:?}", ENR_FILENAME, e))?; enr_file - .write_all(&enr.to_base64().as_bytes()) + .write_all(enr.to_base64().as_bytes()) .map_err(|e| format!("Unable to write ENR to {}: {:?}", ENR_FILENAME, e))?; let secret_bytes = match local_keypair { diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 48ca0338dd2..adc0e041026 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -344,6 +344,13 @@ fn main() { non-default.", ), ) + .arg( + Arg::with_name("seconds-per-slot") + .long("seconds-per-slot") + .value_name("SECONDS") + .takes_value(true) + .help("Eth2 slot time"), + ) .arg( Arg::with_name("seconds-per-eth1-block") .long("seconds-per-eth1-block") diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index 777633ca821..e37145bf0d9 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -43,6 +43,7 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul maybe_update!("genesis-delay", genesis_delay); maybe_update!("eth1-id", deposit_chain_id); maybe_update!("eth1-id", deposit_network_id); + maybe_update!("seconds-per-slot", seconds_per_slot); maybe_update!("seconds-per-eth1-block", seconds_per_eth1_block); if let Some(v) = parse_ssz_optional(matches, "genesis-fork-version")? { diff --git a/lcli/src/parse_ssz.rs b/lcli/src/parse_ssz.rs index 676eb6294ac..34676616d52 100644 --- a/lcli/src/parse_ssz.rs +++ b/lcli/src/parse_ssz.rs @@ -30,7 +30,7 @@ pub fn run_parse_ssz(matches: &ArgMatches) -> Result<(), String> { } fn decode_and_print(bytes: &[u8]) -> Result<(), String> { - let item = T::from_ssz_bytes(&bytes).map_err(|e| format!("SSZ decode failed: {:?}", e))?; + let item = T::from_ssz_bytes(bytes).map_err(|e| format!("SSZ decode failed: {:?}", e))?; println!( "{}", diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 925ce855c44..0598998a470 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -19,7 +19,7 @@ spec-minimal = [] [dependencies] beacon_node = { "path" = "../beacon_node" } -tokio = "1.1.0" +tokio = "1.7.1" slog = { version = "2.5.2", features = ["max_level_trace"] } sloggers = "1.0.1" types = { "path" = "../consensus/types" } diff --git a/lighthouse/environment/Cargo.toml b/lighthouse/environment/Cargo.toml index 6e0556a6d2d..31e153055a8 100644 --- a/lighthouse/environment/Cargo.toml +++ b/lighthouse/environment/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Paul Hauner "] edition = "2018" [dependencies] -tokio = { version = "1.1.0", features = ["macros", "rt", "rt-multi-thread" ] } +tokio = { version = "1.7.1", features = ["macros", "rt", "rt-multi-thread" ] } slog = { version = "2.5.2", features = ["max_level_trace"] } sloggers = "1.0.1" types = { "path" = "../../consensus/types" } diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 80512cf19b6..547e8b8c863 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -364,7 +364,7 @@ fn run( let context = environment.core_context(); let log = context.log().clone(); let executor = context.executor.clone(); - let config = validator_client::Config::from_cli(&matches, context.log()) + let config = validator_client::Config::from_cli(matches, context.log()) .map_err(|e| format!("Unable to initialize validator config: {}", e))?; let shutdown_flag = matches.is_present("immediate-shutdown"); if let Some(dump_path) = clap_utils::parse_optional::(matches, "dump-config")? diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index a6a644fd4d6..0fde9b901d9 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -284,6 +284,24 @@ fn http_flag() { .with_config(|config| assert!(config.http_api.enabled)); } #[test] +fn http_address_flag() { + let addr = "127.0.0.99".parse::().unwrap(); + CommandLineTest::new() + .flag("http-address", Some("127.0.0.99")) + .flag("unencrypted-http-transport", None) + .run() + .with_config(|config| assert_eq!(config.http_api.listen_addr, addr)); +} +#[test] +#[should_panic] +fn missing_unencrypted_http_transport_flag() { + let addr = "127.0.0.99".parse::().unwrap(); + CommandLineTest::new() + .flag("http-address", Some("127.0.0.99")) + .run() + .with_config(|config| assert_eq!(config.http_api.listen_addr, addr)); +} +#[test] fn http_port_flag() { CommandLineTest::new() .flag("http-port", Some("9090")) @@ -360,3 +378,16 @@ pub fn malloc_tuning_flag() { // effects of it. .run(); } +#[test] +fn doppelganger_protection_flag() { + CommandLineTest::new() + .flag("enable-doppelganger-protection", None) + .run() + .with_config(|config| assert!(config.enable_doppelganger_protection)); +} +#[test] +fn no_doppelganger_protection_flag() { + CommandLineTest::new() + .run() + .with_config(|config| assert!(!config.enable_doppelganger_protection)); +} diff --git a/remote_signer/backend/src/zeroize_string.rs b/remote_signer/backend/src/zeroize_string.rs index 0d4630f3fa6..bb76e1e87e7 100644 --- a/remote_signer/backend/src/zeroize_string.rs +++ b/remote_signer/backend/src/zeroize_string.rs @@ -73,7 +73,7 @@ mod object { #[test] fn v_u8_zeroized() { // Create from `hex_string_to_bytes`, and record the pointer to its buffer. - let mut decoded_bytes = hex_string_to_bytes(&SECRET_KEY_1.to_string()).unwrap(); + let mut decoded_bytes = hex_string_to_bytes(SECRET_KEY_1).unwrap(); let old_pointer = decoded_bytes.as_ptr() as usize; // Do something with the borrowed vector, and zeroize. @@ -185,17 +185,17 @@ mod functions { ); assert_eq!( - hex_string_to_bytes(&SECRET_KEY_1).unwrap(), + hex_string_to_bytes(SECRET_KEY_1).unwrap(), SECRET_KEY_1_BYTES ); assert_eq!( - hex_string_to_bytes(&PUBLIC_KEY_1).unwrap(), + hex_string_to_bytes(PUBLIC_KEY_1).unwrap(), PUBLIC_KEY_1_BYTES.to_vec() ); assert_eq!( - hex_string_to_bytes(&SIGNING_ROOT).unwrap(), + hex_string_to_bytes(SIGNING_ROOT).unwrap(), SIGNING_ROOT_BYTES.to_vec() ); diff --git a/scripts/local_testnet/README.md b/scripts/local_testnet/README.md index 6727e2d28c8..5cb8407a98f 100644 --- a/scripts/local_testnet/README.md +++ b/scripts/local_testnet/README.md @@ -20,7 +20,7 @@ Start a local eth1 ganache server ./ganache_test_node.sh ``` -Assuming you are happy with the configuration in `var.env`, deploy the deposit contract, make deposits, +Assuming you are happy with the configuration in `var.env`, deploy the deposit contract, make deposits, create the testnet directory, genesis state and validator keys with: ```bash diff --git a/scripts/local_testnet/bootnode.sh b/scripts/local_testnet/bootnode.sh index 7ade1f0db58..9558a487bc4 100755 --- a/scripts/local_testnet/bootnode.sh +++ b/scripts/local_testnet/bootnode.sh @@ -30,4 +30,4 @@ exec lighthouse boot_node \ --testnet-dir $TESTNET_DIR \ --port $BOOTNODE_PORT \ --listen-address 127.0.0.1 \ - --network-dir $DATADIR/bootnode \ \ No newline at end of file + --network-dir $DATADIR/bootnode \ diff --git a/scripts/local_testnet/ganache_test_node.sh b/scripts/local_testnet/ganache_test_node.sh index 43a0e0e5dcf..762700dbd63 100755 --- a/scripts/local_testnet/ganache_test_node.sh +++ b/scripts/local_testnet/ganache_test_node.sh @@ -2,12 +2,12 @@ source ./vars.env -ganache-cli \ +exec ganache-cli \ --defaultBalanceEther 1000000000 \ --gasLimit 1000000000 \ --accounts 10 \ --mnemonic "$ETH1_NETWORK_MNEMONIC" \ --port 8545 \ - --blockTime 3 \ + --blockTime $SECONDS_PER_ETH1_BLOCK \ --networkId "$NETWORK_ID" \ --chainId "$NETWORK_ID" diff --git a/scripts/local_testnet/setup.sh b/scripts/local_testnet/setup.sh index 4e86ec88064..a171fb1b085 100755 --- a/scripts/local_testnet/setup.sh +++ b/scripts/local_testnet/setup.sh @@ -33,7 +33,8 @@ lcli \ --altair-fork-epoch $ALTAIR_FORK_EPOCH \ --eth1-id $NETWORK_ID \ --eth1-follow-distance 1 \ - --seconds-per-eth1-block 1 \ + --seconds-per-slot $SECONDS_PER_SLOT \ + --seconds-per-eth1-block $SECONDS_PER_ETH1_BLOCK \ --force echo Specification generated at $TESTNET_DIR. diff --git a/scripts/local_testnet/validator_client.sh b/scripts/local_testnet/validator_client.sh index 98629b4b3bd..6755384be59 100755 --- a/scripts/local_testnet/validator_client.sh +++ b/scripts/local_testnet/validator_client.sh @@ -16,4 +16,5 @@ exec lighthouse \ --datadir $1 \ --testnet-dir $TESTNET_DIR \ --init-slashing-protection \ - --beacon-nodes $2 + --beacon-nodes $2 \ + $VC_ARGS diff --git a/scripts/local_testnet/vars.env b/scripts/local_testnet/vars.env index 3152dd49f47..5c2ed22bd67 100644 --- a/scripts/local_testnet/vars.env +++ b/scripts/local_testnet/vars.env @@ -28,3 +28,9 @@ NETWORK_ID=4242 # Hard fork configuration ALTAIR_FORK_EPOCH=18446744073709551615 + +# Seconds per Eth2 slot +SECONDS_PER_SLOT=3 + +# Seconds per Eth1 block +SECONDS_PER_ETH1_BLOCK=1 diff --git a/scripts/tests/doppelganger_protection.sh b/scripts/tests/doppelganger_protection.sh new file mode 100755 index 00000000000..d10249bdcae --- /dev/null +++ b/scripts/tests/doppelganger_protection.sh @@ -0,0 +1,141 @@ +#!/usr/bin/env bash + +# Requires `lighthouse`, ``lcli`, `ganache-cli`, `curl`, `jq` + +BEHAVIOR=$1 + +if [[ "$BEHAVIOR" != "success" ]] && [[ "$BEHAVIOR" != "failure" ]]; then + echo "Usage: doppelganger_protection.sh [success|failure]" + exit 1 +fi + +source ./vars.env + +../local_testnet/clean.sh + +echo "Starting ganache" + +../local_testnet/ganache_test_node.sh &> /dev/null & +GANACHE_PID=$! + +# Wait for ganache to start +sleep 5 + +echo "Setting up local testnet" + +../local_testnet/setup.sh + +# Duplicate this directory so slashing protection doesn't keep us from re-using validator keys +cp -R $HOME/.lighthouse/local-testnet/node_1 $HOME/.lighthouse/local-testnet/node_1_doppelganger + +echo "Starting bootnode" + +../local_testnet/bootnode.sh &> /dev/null & +BOOT_PID=$! + +# wait for the bootnode to start +sleep 10 + +echo "Starting local beacon nodes" + +../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_1 9000 8000 &> /dev/null & +BEACON_PID=$! +../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_2 9100 8100 &> /dev/null & +BEACON_PID2=$! +../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_3 9200 8200 &> /dev/null & +BEACON_PID3=$! + +echo "Starting local validator clients" + +../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_1 http://localhost:8000 &> /dev/null & +VALIDATOR_1_PID=$! +../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_2 http://localhost:8100 &> /dev/null & +VALIDATOR_2_PID=$! +../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_3 http://localhost:8200 &> /dev/null & +VALIDATOR_3_PID=$! + +echo "Waiting an epoch before starting the next validator client" +sleep $(( $SECONDS_PER_SLOT * 32 )) + +if [[ "$BEHAVIOR" == "failure" ]]; then + + echo "Starting the doppelganger validator client" + + # Use same keys as keys from VC1, but connect to BN2 + # This process should not last longer than 2 epochs + timeout $(( $SECONDS_PER_SLOT * 32 * 2 )) ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_1_doppelganger http://localhost:8100 + DOPPELGANGER_EXIT=$? + + echo "Shutting down" + + # Cleanup + kill $BOOT_PID $BEACON_PID $BEACON_PID2 $BEACON_PID3 $GANACHE_PID $VALIDATOR_1_PID $VALIDATOR_2_PID $VALIDATOR_3_PID + + echo "Done" + + if [[ $DOPPELGANGER_EXIT -eq 124 ]]; then + exit 1 + fi +fi + +if [[ "$BEHAVIOR" == "success" ]]; then + + echo "Starting the last validator client" + + ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_4 http://localhost:8100 & + VALIDATOR_4_PID=$! + DOPPELGANGER_FAILURE=0 + + # Sleep three epochs, then make sure all validators were active in epoch 2. Use + # `is_previous_epoch_target_attester` from epoch 3 for a complete view of epoch 2 inclusion. + # + # See: https://lighthouse-book.sigmaprime.io/validator-inclusion.html + echo "Waiting three epochs..." + sleep $(( $SECONDS_PER_SLOT * 32 * 3 )) + + PREVIOUS_DIR=$(pwd) + cd $HOME/.lighthouse/local-testnet/node_4/validators + for val in 0x*; do + [[ -e $val ]] || continue + curl -s localhost:8100/lighthouse/validator_inclusion/3/$val | jq | grep -q '"is_previous_epoch_target_attester": false' + IS_ATTESTER=$? + if [[ $IS_ATTESTER -eq 0 ]]; then + echo "$val did not attest in epoch 2." + else + echo "ERROR! $val did attest in epoch 2." + DOPPELGANGER_FAILURE=1 + fi + done + + # Sleep two epochs, then make sure all validators were active in epoch 4. Use + # `is_previous_epoch_target_attester` from epoch 5 for a complete view of epoch 4 inclusion. + # + # See: https://lighthouse-book.sigmaprime.io/validator-inclusion.html + echo "Waiting two more epochs..." + sleep $(( $SECONDS_PER_SLOT * 32 * 2 )) + for val in 0x*; do + [[ -e $val ]] || continue + curl -s localhost:8100/lighthouse/validator_inclusion/5/$val | jq | grep -q '"is_previous_epoch_target_attester": true' + IS_ATTESTER=$? + if [[ $IS_ATTESTER -eq 0 ]]; then + echo "$val attested in epoch 4." + else + echo "ERROR! $val did not attest in epoch 4." + DOPPELGANGER_FAILURE=1 + fi + done + + echo "Shutting down" + + # Cleanup + cd $PREVIOUS_DIR + kill $BOOT_PID $BEACON_PID $BEACON_PID2 $BEACON_PID3 $GANACHE_PID $VALIDATOR_1_PID $VALIDATOR_2_PID $VALIDATOR_3_PID $VALIDATOR_4_PID + + echo "Done" + + if [[ $DOPPELGANGER_FAILURE -eq 1 ]]; then + exit 1 + fi +fi + +exit 0 diff --git a/scripts/tests/vars.env b/scripts/tests/vars.env new file mode 100644 index 00000000000..7e11393035b --- /dev/null +++ b/scripts/tests/vars.env @@ -0,0 +1,39 @@ +# Base directories for the validator keys and secrets +DATADIR=~/.lighthouse/local-testnet + +# Directory for the eth2 config +TESTNET_DIR=$DATADIR/testnet + +# Mnemonic for the ganache test network +ETH1_NETWORK_MNEMONIC="vast thought differ pull jewel broom cook wrist tribe word before omit" + +# Hardcoded deposit contract based on ETH1_NETWORK_MNEMONIC +DEPOSIT_CONTRACT_ADDRESS=8c594691c0e592ffa21f153a16ae41db5befcaaa + +GENESIS_FORK_VERSION=0x42424242 + +VALIDATOR_COUNT=80 +GENESIS_VALIDATOR_COUNT=80 + +# Number of validator client instances that you intend to run +NODE_COUNT=4 + +GENESIS_DELAY=0 + +# Port for P2P communication with bootnode +BOOTNODE_PORT=4242 + +# Network ID and Chain ID of local eth1 test network +NETWORK_ID=4242 + +# Hard fork configuration +ALTAIR_FORK_EPOCH=18446744073709551615 + +# Seconds per Eth2 slot +SECONDS_PER_SLOT=3 + +# Seconds per Eth1 block +SECONDS_PER_ETH1_BLOCK=1 + +# Enable doppelganger detection +VC_ARGS=" --enable-doppelganger-protection " diff --git a/slasher/service/Cargo.toml b/slasher/service/Cargo.toml index 0c43ed11371..23a85e7ba80 100644 --- a/slasher/service/Cargo.toml +++ b/slasher/service/Cargo.toml @@ -14,6 +14,6 @@ slog = "2.5.2" slot_clock = { path = "../../common/slot_clock" } state_processing = { path = "../../consensus/state_processing" } task_executor = { path = "../../common/task_executor" } -tokio = { version = "1.1.0", features = ["full"] } +tokio = { version = "1.7.1", features = ["full"] } tokio-stream = "0.1.3" types = { path = "../../consensus/types" } diff --git a/slasher/service/src/service.rs b/slasher/service/src/service.rs index 96a157adc7f..227a6344a9a 100644 --- a/slasher/service/src/service.rs +++ b/slasher/service/src/service.rs @@ -214,7 +214,7 @@ impl SlasherService { // Publish to the network if broadcast is enabled. if slasher.config().broadcast { if let Err(e) = - Self::publish_attester_slashing(&beacon_chain, &network_sender, slashing) + Self::publish_attester_slashing(beacon_chain, network_sender, slashing) { debug!( log, @@ -267,7 +267,7 @@ impl SlasherService { if slasher.config().broadcast { if let Err(e) = - Self::publish_proposer_slashing(&beacon_chain, &network_sender, slashing) + Self::publish_proposer_slashing(beacon_chain, network_sender, slashing) { debug!( log, diff --git a/slasher/src/slasher.rs b/slasher/src/slasher.rs index 91cf84fc309..d4e3bf4ca9e 100644 --- a/slasher/src/slasher.rs +++ b/slasher/src/slasher.rs @@ -244,7 +244,7 @@ impl Slasher { let slashing_status = self.db.check_and_update_attester_record( txn, validator_index, - &attestation, + attestation, attester_record, )?; diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index b24d4b8686b..26d66cb6c8b 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.1.0-alpha.7 +TESTS_TAG := v1.1.0-beta.2 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 5c3275135bc..a3d2a90c8de 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -45,24 +45,6 @@ # LightClientSnapshot "tests/minimal/altair/ssz_static/LightClientSnapshot", "tests/mainnet/altair/ssz_static/LightClientSnapshot", - # ContributionAndProof - "tests/minimal/altair/ssz_static/ContributionAndProof", - "tests/mainnet/altair/ssz_static/ContributionAndProof", - # SignedContributionAndProof - "tests/minimal/altair/ssz_static/SignedContributionAndProof", - "tests/mainnet/altair/ssz_static/SignedContributionAndProof", - # SyncCommitteeContribution - "tests/minimal/altair/ssz_static/SyncCommitteeContribution", - "tests/mainnet/altair/ssz_static/SyncCommitteeContribution", - # SyncCommitteeMessage - "tests/minimal/altair/ssz_static/SyncCommitteeMessage", - "tests/mainnet/altair/ssz_static/SyncCommitteeMessage", - # SyncCommitteeSigningData - "tests/minimal/altair/ssz_static/SyncCommitteeSigningData", - "tests/mainnet/altair/ssz_static/SyncCommitteeSigningData", - # SyncAggregatorSelectionData - "tests/minimal/altair/ssz_static/SyncAggregatorSelectionData", - "tests/mainnet/altair/ssz_static/SyncAggregatorSelectionData", # Fork choice "tests/mainnet/phase0/fork_choice", "tests/minimal/phase0/fork_choice", diff --git a/testing/ef_tests/src/case_result.rs b/testing/ef_tests/src/case_result.rs index f20d14836bd..4982bf94c1f 100644 --- a/testing/ef_tests/src/case_result.rs +++ b/testing/ef_tests/src/case_result.rs @@ -41,7 +41,7 @@ pub fn compare_beacon_state_results_without_caches( expected.drop_all_caches().unwrap(); } - compare_result_detailed(&result, &expected) + compare_result_detailed(result, expected) } /// Same as `compare_result`, however utilizes the `CompareFields` trait to give a list of diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index 8ca3775f06d..56e6c9b7bca 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -5,7 +5,6 @@ use crate::decode::{ssz_decode_state, yaml_decode_file}; use crate::type_name; use crate::type_name::TypeName; use serde_derive::Deserialize; -use state_processing::per_epoch_processing::validator_statuses::ValidatorStatuses; use state_processing::per_epoch_processing::{ altair, base, effective_balance_updates::process_effective_balance_updates, @@ -87,7 +86,7 @@ impl EpochTransition for JustificationAndFinalization { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { match state { BeaconState::Base(_) => { - let mut validator_statuses = ValidatorStatuses::new(state, spec)?; + let mut validator_statuses = base::ValidatorStatuses::new(state, spec)?; validator_statuses.process_attestations(state)?; base::process_justification_and_finalization( state, @@ -95,7 +94,10 @@ impl EpochTransition for JustificationAndFinalization { spec, ) } - BeaconState::Altair(_) => altair::process_justification_and_finalization(state, spec), + BeaconState::Altair(_) => altair::process_justification_and_finalization( + state, + &altair::ParticipationCache::new(state, spec).unwrap(), + ), } } } @@ -104,11 +106,15 @@ impl EpochTransition for RewardsAndPenalties { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { match state { BeaconState::Base(_) => { - let mut validator_statuses = ValidatorStatuses::new(state, spec)?; + let mut validator_statuses = base::ValidatorStatuses::new(state, spec)?; validator_statuses.process_attestations(state)?; base::process_rewards_and_penalties(state, &mut validator_statuses, spec) } - BeaconState::Altair(_) => altair::process_rewards_and_penalties(state, spec), + BeaconState::Altair(_) => altair::process_rewards_and_penalties( + state, + &altair::ParticipationCache::new(state, spec).unwrap(), + spec, + ), } } } @@ -123,8 +129,8 @@ impl EpochTransition for Slashings { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { match state { BeaconState::Base(_) => { - let mut validator_statuses = ValidatorStatuses::new(&state, spec)?; - validator_statuses.process_attestations(&state)?; + let mut validator_statuses = base::ValidatorStatuses::new(state, spec)?; + validator_statuses.process_attestations(state)?; process_slashings( state, validator_statuses.total_balances.current_epoch(), @@ -135,7 +141,9 @@ impl EpochTransition for Slashings { BeaconState::Altair(_) => { process_slashings( state, - state.get_total_active_balance(spec)?, + altair::ParticipationCache::new(state, spec) + .unwrap() + .current_epoch_total_active_balance(), spec.proportional_slashing_multiplier_altair, spec, )?; @@ -198,7 +206,11 @@ impl EpochTransition for InactivityUpdates { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { match state { BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) => altair::process_inactivity_updates(state, spec), + BeaconState::Altair(_) => altair::process_inactivity_updates( + state, + &altair::ParticipationCache::new(state, spec).unwrap(), + spec, + ), } } } diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 0f63d4eb0b8..293195662db 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -67,13 +67,19 @@ impl Operation for Attestation { state: &mut BeaconState, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { + let proposer_index = state.get_beacon_proposer_index(state.slot(), spec)? as u64; match state { BeaconState::Base(_) => { base::process_attestations(state, &[self.clone()], VerifySignatures::True, spec) } - BeaconState::Altair(_) => { - altair::process_attestation(state, self, 0, VerifySignatures::True, spec) - } + BeaconState::Altair(_) => altair::process_attestation( + state, + self, + 0, + proposer_index, + VerifySignatures::True, + spec, + ), } } } @@ -192,7 +198,7 @@ impl Operation for SyncAggregate { spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { let proposer_index = state.get_beacon_proposer_index(state.slot(), spec)? as u64; - process_sync_aggregate(state, self, proposer_index, spec) + process_sync_aggregate(state, self, proposer_index, VerifySignatures::True, spec) } } diff --git a/testing/ef_tests/src/cases/rewards.rs b/testing/ef_tests/src/cases/rewards.rs index df9c1766199..0cdde4c32a3 100644 --- a/testing/ef_tests/src/cases/rewards.rs +++ b/testing/ef_tests/src/cases/rewards.rs @@ -4,11 +4,10 @@ use crate::decode::{ssz_decode_file, ssz_decode_state, yaml_decode_file}; use compare_fields_derive::CompareFields; use serde_derive::Deserialize; use ssz_derive::{Decode, Encode}; -use state_processing::per_epoch_processing::validator_statuses::ValidatorStatuses; use state_processing::{ per_epoch_processing::{ - altair::{self, rewards_and_penalties::get_flag_index_deltas}, - base::{self, rewards_and_penalties::AttestationDelta}, + altair::{self, rewards_and_penalties::get_flag_index_deltas, ParticipationCache}, + base::{self, rewards_and_penalties::AttestationDelta, ValidatorStatuses}, Delta, }, EpochProcessingError, @@ -54,7 +53,7 @@ type Accessor = fn(&AttestationDelta) -> Δ fn load_optional_deltas_file(path: &Path) -> Result, Error> { let deltas = if path.is_file() { - Some(ssz_decode_file(&path)?) + Some(ssz_decode_file(path)?) } else { None }; @@ -187,7 +186,14 @@ fn compute_altair_flag_deltas( spec: &ChainSpec, ) -> Result { let mut deltas = vec![Delta::default(); state.validators().len()]; - get_flag_index_deltas(&mut deltas, state, flag_index, total_active_balance, spec)?; + get_flag_index_deltas( + &mut deltas, + state, + flag_index, + total_active_balance, + &ParticipationCache::new(state, spec).unwrap(), + spec, + )?; Ok(convert_altair_deltas(deltas)) } @@ -196,7 +202,12 @@ fn compute_altair_inactivity_deltas( spec: &ChainSpec, ) -> Result { let mut deltas = vec![Delta::default(); state.validators().len()]; - altair::rewards_and_penalties::get_inactivity_penalty_deltas(&mut deltas, state, spec)?; + altair::rewards_and_penalties::get_inactivity_penalty_deltas( + &mut deltas, + state, + &ParticipationCache::new(state, spec).unwrap(), + spec, + )?; Ok(convert_altair_deltas(deltas)) } diff --git a/testing/ef_tests/src/results.rs b/testing/ef_tests/src/results.rs index 4f5513a9ae3..c3f17b54f9b 100644 --- a/testing/ef_tests/src/results.rs +++ b/testing/ef_tests/src/results.rs @@ -11,7 +11,7 @@ pub fn assert_tests_pass(handler_name: &str, path: &Path, results: &[CaseResult] &failed, &skipped_bls, &skipped_known_failures, - &results, + results, ); if !failed.is_empty() { panic!("Tests failed (see above)"); diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index ed5ef8d1fdc..6576a2fb26d 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -51,6 +51,7 @@ type_name_generic!(BeaconState); type_name_generic!(BeaconStateBase, "BeaconState"); type_name_generic!(BeaconStateAltair, "BeaconState"); type_name!(Checkpoint); +type_name_generic!(ContributionAndProof); type_name!(Deposit); type_name!(DepositData); type_name!(DepositMessage); @@ -64,8 +65,12 @@ type_name!(ProposerSlashing); type_name_generic!(SignedAggregateAndProof); type_name_generic!(SignedBeaconBlock); type_name!(SignedBeaconBlockHeader); +type_name_generic!(SignedContributionAndProof); type_name!(SignedVoluntaryExit); type_name!(SigningData); +type_name_generic!(SyncCommitteeContribution); +type_name!(SyncCommitteeMessage); +type_name!(SyncAggregatorSelectionData); type_name_generic!(SyncAggregate); type_name_generic!(SyncCommittee); type_name!(Validator); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 84168eb5a4d..59c70d6c26d 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -216,6 +216,20 @@ mod ssz_static { } // Altair-only + #[test] + fn contribution_and_proof() { + SszStaticHandler::, MinimalEthSpec>::altair_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::altair_only() + .run(); + } + + #[test] + fn signed_contribution_and_proof() { + SszStaticHandler::, MinimalEthSpec>::altair_only().run(); + SszStaticHandler::, MainnetEthSpec>::altair_only().run(); + } + #[test] fn sync_aggregate() { SszStaticHandler::, MinimalEthSpec>::altair_only().run(); @@ -227,6 +241,28 @@ mod ssz_static { SszStaticHandler::, MinimalEthSpec>::altair_only().run(); SszStaticHandler::, MainnetEthSpec>::altair_only().run(); } + + #[test] + fn sync_committee_contribution() { + SszStaticHandler::, MinimalEthSpec>::altair_only( + ) + .run(); + SszStaticHandler::, MainnetEthSpec>::altair_only( + ) + .run(); + } + + #[test] + fn sync_committee_message() { + SszStaticHandler::::altair_only().run(); + SszStaticHandler::::altair_only().run(); + } + + #[test] + fn sync_aggregator_selection_data() { + SszStaticHandler::::altair_only().run(); + SszStaticHandler::::altair_only().run(); + } } #[test] diff --git a/testing/eth1_test_rig/Cargo.toml b/testing/eth1_test_rig/Cargo.toml index 73f96bff561..12286735c58 100644 --- a/testing/eth1_test_rig/Cargo.toml +++ b/testing/eth1_test_rig/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Paul Hauner "] edition = "2018" [dependencies] -tokio = { version = "1.1.0", features = ["time"] } +tokio = { version = "1.7.1", features = ["time"] } tokio-compat-02 = "0.2.0" web3 = { version = "0.16.0", default-features = false, features = ["http-tls", "signing", "ws-tls-tokio"] } futures = "0.3.7" diff --git a/testing/remote_signer_test/Cargo.toml b/testing/remote_signer_test/Cargo.toml index 0fbb4b106ec..43cd2e13c1b 100644 --- a/testing/remote_signer_test/Cargo.toml +++ b/testing/remote_signer_test/Cargo.toml @@ -15,7 +15,7 @@ reqwest = { version = "0.11.0", features = ["blocking", "json"] } serde = { version = "1.0.116", features = ["derive"] } serde_json = "1.0.58" tempfile = "3.1.0" -tokio = { version = "1.1.0", features = ["time"] } +tokio = { version = "1.7.1", features = ["time"] } types = { path = "../../consensus/types" } sensitive_url = { path = "../../common/sensitive_url" } diff --git a/testing/simulator/Cargo.toml b/testing/simulator/Cargo.toml index 6733ee6327c..038f8f72b6f 100644 --- a/testing/simulator/Cargo.toml +++ b/testing/simulator/Cargo.toml @@ -13,7 +13,7 @@ types = { path = "../../consensus/types" } validator_client = { path = "../../validator_client" } parking_lot = "0.11.0" futures = "0.3.7" -tokio = "1.1.0" +tokio = "1.7.1" eth1_test_rig = { path = "../eth1_test_rig" } env_logger = "0.8.2" clap = "2.33.3" diff --git a/testing/simulator/src/no_eth1_sim.rs b/testing/simulator/src/no_eth1_sim.rs index 09ab132bc3e..2eda987d497 100644 --- a/testing/simulator/src/no_eth1_sim.rs +++ b/testing/simulator/src/no_eth1_sim.rs @@ -125,7 +125,7 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { network.clone(), Epoch::new(4).start_slot(MainnetEthSpec::slots_per_epoch()), slot_duration, - ) + ), ); finalization?; block_prod?; diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index df74a203a45..801a0954b03 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -9,7 +9,7 @@ name = "validator_client" path = "src/lib.rs" [dev-dependencies] -tokio = { version = "1.1.0", features = ["time", "rt-multi-thread", "macros"] } +tokio = { version = "1.7.1", features = ["time", "rt-multi-thread", "macros"] } deposit_contract = { path = "../common/deposit_contract" } [dependencies] @@ -30,7 +30,7 @@ serde_yaml = "0.8.13" slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } slog-async = "2.5.0" slog-term = "2.6.0" -tokio = { version = "1.1.0", features = ["time"] } +tokio = { version = "1.7.1", features = ["time"] } futures = "0.3.7" dirs = "3.0.1" directory = { path = "../common/directory" } @@ -57,7 +57,7 @@ warp_utils = { path = "../common/warp_utils" } warp = { git = "https://github.com/paulhauner/warp ", branch = "cors-wildcard" } hyper = "0.14.4" serde_utils = { path = "../consensus/serde_utils" } -libsecp256k1 = "0.3.5" +libsecp256k1 = "0.5.0" ring = "0.16.19" rand = "0.7.3" scrypt = { version = "0.5.0", default-features = false } @@ -66,3 +66,4 @@ lazy_static = "1.4.0" fallback = { path = "../common/fallback" } monitoring_api = { path = "../common/monitoring_api" } sensitive_url = { path = "../common/sensitive_url" } +task_executor = { path = "../common/task_executor" } diff --git a/validator_client/slashing_protection/src/slashing_database.rs b/validator_client/slashing_protection/src/slashing_database.rs index 3222a95cbfb..e7fcb0ffae0 100644 --- a/validator_client/slashing_protection/src/slashing_database.rs +++ b/validator_client/slashing_protection/src/slashing_database.rs @@ -93,7 +93,7 @@ impl SlashingDatabase { /// Open an existing `SlashingDatabase` from disk. pub fn open(path: &Path) -> Result { - let conn_pool = Self::open_conn_pool(&path)?; + let conn_pool = Self::open_conn_pool(path)?; Ok(Self { conn_pool }) } @@ -159,7 +159,7 @@ impl SlashingDatabase { ) -> Result<(), NotSafe> { let mut stmt = txn.prepare("INSERT INTO validators (public_key) VALUES (?1)")?; for pubkey in public_keys { - if self.get_validator_id_opt(&txn, pubkey)?.is_none() { + if self.get_validator_id_opt(txn, pubkey)?.is_none() { stmt.execute(&[pubkey.as_hex_string()])?; } } @@ -481,10 +481,10 @@ impl SlashingDatabase { signing_root: SigningRoot, txn: &Transaction, ) -> Result { - let safe = self.check_block_proposal(&txn, validator_pubkey, slot, signing_root)?; + let safe = self.check_block_proposal(txn, validator_pubkey, slot, signing_root)?; if safe != Safe::SameData { - self.insert_block_proposal(&txn, validator_pubkey, slot, signing_root)?; + self.insert_block_proposal(txn, validator_pubkey, slot, signing_root)?; } Ok(safe) } @@ -541,7 +541,7 @@ impl SlashingDatabase { txn: &Transaction, ) -> Result { let safe = self.check_attestation( - &txn, + txn, validator_pubkey, att_source_epoch, att_target_epoch, @@ -550,7 +550,7 @@ impl SlashingDatabase { if safe != Safe::SameData { self.insert_attestation( - &txn, + txn, validator_pubkey, att_source_epoch, att_target_epoch, @@ -695,7 +695,7 @@ impl SlashingDatabase { .query_and_then(params![], |row| { let validator_pubkey: String = row.get(0)?; let slot = row.get(1)?; - let signing_root = Some(hash256_from_row(2, &row)?); + let signing_root = Some(hash256_from_row(2, row)?); let signed_block = InterchangeBlock { slot, signing_root }; data.entry(validator_pubkey) .or_insert_with(|| (vec![], vec![])) @@ -715,7 +715,7 @@ impl SlashingDatabase { let validator_pubkey: String = row.get(0)?; let source_epoch = row.get(1)?; let target_epoch = row.get(2)?; - let signing_root = Some(hash256_from_row(3, &row)?); + let signing_root = Some(hash256_from_row(3, row)?); let signed_attestation = InterchangeAttestation { source_epoch, target_epoch, diff --git a/validator_client/src/attestation_service.rs b/validator_client/src/attestation_service.rs index af016f7cf4b..50f127db5bf 100644 --- a/validator_client/src/attestation_service.rs +++ b/validator_client/src/attestation_service.rs @@ -20,7 +20,7 @@ use types::{ /// Builds an `AttestationService`. pub struct AttestationServiceBuilder { duties_service: Option>>, - validator_store: Option>, + validator_store: Option>>, slot_clock: Option, beacon_nodes: Option>>, context: Option>, @@ -42,7 +42,7 @@ impl AttestationServiceBuilder { self } - pub fn validator_store(mut self, store: ValidatorStore) -> Self { + pub fn validator_store(mut self, store: Arc>) -> Self { self.validator_store = Some(store); self } @@ -88,7 +88,7 @@ impl AttestationServiceBuilder { /// Helper to minimise `Arc` usage. pub struct Inner { duties_service: Arc>, - validator_store: ValidatorStore, + validator_store: Arc>, slot_clock: T, beacon_nodes: Arc>, context: RuntimeContext, @@ -377,25 +377,22 @@ impl AttestationService { signature: AggregateSignature::infinity(), }; - if self - .validator_store - .sign_attestation( - &duty.pubkey, - duty.validator_committee_index as usize, - &mut attestation, - current_epoch, - ) - .is_some() - { - attestations.push(attestation); - } else { + if let Err(e) = self.validator_store.sign_attestation( + duty.pubkey, + duty.validator_committee_index as usize, + &mut attestation, + current_epoch, + ) { crit!( log, "Failed to sign attestation"; + "error" => ?e, "committee_index" => committee_index, "slot" => slot.as_u64(), ); continue; + } else { + attestations.push(attestation); } } @@ -497,17 +494,22 @@ impl AttestationService { continue; } - if let Some(aggregate) = self.validator_store.produce_signed_aggregate_and_proof( - &duty.pubkey, + match self.validator_store.produce_signed_aggregate_and_proof( + duty.pubkey, duty.validator_index, aggregated_attestation.clone(), selection_proof.clone(), ) { - signed_aggregate_and_proofs.push(aggregate); - } else { - crit!(log, "Failed to sign attestation"); - continue; - }; + Ok(aggregate) => signed_aggregate_and_proofs.push(aggregate), + Err(e) => { + crit!( + log, + "Failed to sign attestation"; + "error" => ?e + ); + continue; + } + } } if !signed_aggregate_and_proofs.is_empty() { diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 091a89c634c..f102df18b1e 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -5,7 +5,6 @@ use crate::{ use crate::{http_metrics::metrics, validator_store::ValidatorStore}; use environment::RuntimeContext; use eth2::types::Graffiti; -use futures::TryFutureExt; use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use std::ops::Deref; @@ -15,7 +14,7 @@ use types::{EthSpec, PublicKeyBytes, Slot}; /// Builds a `BlockService`. pub struct BlockServiceBuilder { - validator_store: Option>, + validator_store: Option>>, slot_clock: Option>, beacon_nodes: Option>>, context: Option>, @@ -35,7 +34,7 @@ impl BlockServiceBuilder { } } - pub fn validator_store(mut self, store: ValidatorStore) -> Self { + pub fn validator_store(mut self, store: Arc>) -> Self { self.validator_store = Some(store); self } @@ -89,7 +88,7 @@ impl BlockServiceBuilder { /// Helper to minimise `Arc` usage. pub struct Inner { - validator_store: ValidatorStore, + validator_store: Arc>, slot_clock: Arc, beacon_nodes: Arc>, context: RuntimeContext, @@ -207,15 +206,15 @@ impl BlockService { let service = self.clone(); let log = log.clone(); self.inner.context.executor.spawn( - service - .publish_block(slot, validator_pubkey) - .unwrap_or_else(move |e| { + async move { + if let Err(e) = service.publish_block(slot, validator_pubkey).await { crit!( log, "Error whilst producing block"; "message" => e ); - }), + } + }, "block service", ); } @@ -240,8 +239,8 @@ impl BlockService { let randao_reveal = self .validator_store - .randao_reveal(&validator_pubkey, slot.epoch(E::slots_per_epoch())) - .ok_or("Unable to produce randao reveal")? + .randao_reveal(validator_pubkey, slot.epoch(E::slots_per_epoch())) + .map_err(|e| format!("Unable to produce randao reveal signature: {:?}", e))? .into(); let graffiti = self @@ -276,8 +275,8 @@ impl BlockService { let signed_block = self_ref .validator_store - .sign_block(validator_pubkey_ref, block, current_slot) - .ok_or("Unable to sign block")?; + .sign_block(*validator_pubkey_ref, block, current_slot) + .map_err(|e| format!("Unable to sign block: {:?}", e))?; let _post_timer = metrics::start_timer_vec( &metrics::BLOCK_SERVICE_TIMES, diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 08d3d9ae56e..386a20bf069 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -125,23 +125,36 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(false), ) /* - * Note: there is purposefully no `--http-address` flag provided. + * Note: The HTTP server is **not** encrypted (i.e., not HTTPS) and therefore it is + * unsafe to publish on a public network. * - * The HTTP server is **not** encrypted (i.e., not HTTPS) and therefore it is unsafe to - * publish on a public network. - * - * We restrict the user to `127.0.0.1` and they must provide some other transport-layer - * encryption (e.g., SSH tunnels). + * If the `--http-address` flag is used, the `--unencrypted-http-transport` flag + * must also be used in order to make it clear to the user that this is unsafe. */ + .arg( + Arg::with_name("http-address") + .long("http-address") + .value_name("ADDRESS") + .help("Set the address for the HTTP address. The HTTP server is not encrypted \ + and therefore it is unsafe to publish on a public network. When this \ + flag is used, it additionally requires the explicit use of the \ + `--unencrypted-http-transport` flag to ensure the user is aware of the \ + risks involved. For access via the Internet, users should apply \ + transport-layer security like a HTTPS reverse-proxy or SSH tunnelling.") + .requires("unencrypted-http-transport"), + ) + .arg( + Arg::with_name("unencrypted-http-transport") + .long("unencrypted-http-transport") + .help("This is a safety flag to ensure that the user is aware that the http \ + transport is unencrypted and using a custom HTTP address is unsafe.") + .requires("http-address"), + ) .arg( Arg::with_name("http-port") .long("http-port") .value_name("PORT") - .help("Set the listen TCP port for the RESTful HTTP API server. This server does **not** \ - provide encryption and is completely unsuitable to expose to a public network. \ - We do not provide a --http-address flag and restrict the user to listening on \ - 127.0.0.1. For access via the Internet, apply a transport-layer security like \ - a HTTPS reverse-proxy or SSH tunnelling.") + .help("Set the listen TCP port for the RESTful HTTP API server.") .default_value("5062") .takes_value(true), ) @@ -203,4 +216,19 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { and never provide an untrusted URL.") .takes_value(true), ) + .arg( + Arg::with_name("enable-doppelganger-protection") + .long("enable-doppelganger-protection") + .value_name("ENABLE_DOPPELGANGER_PROTECTION") + .help("If this flag is set, Lighthouse will delay startup for three epochs and \ + monitor for messages on the network by any of the validators managed by this \ + client. This will result in three (possibly four) epochs worth of missed \ + attestations. If an attestation is detected during this period, it means it is \ + very likely that you are running a second validator client with the same keys. \ + This validator client will immediately shutdown if this is detected in order \ + to avoid potentially committing a slashable offense. Use this flag in order to \ + ENABLE this functionality, without this flag Lighthouse will begin attesting \ + immediately.") + .takes_value(false), + ) } diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index b299247de20..06f91e2bb93 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -47,6 +47,9 @@ pub struct Config { pub http_metrics: http_metrics::Config, /// Configuration for sending metrics to a remote explorer endpoint. pub monitoring_api: Option, + /// If true, enable functionality that monitors the network for attestations or proposals from + /// any of the validators managed by this client before starting up. + pub enable_doppelganger_protection: bool, } impl Default for Config { @@ -76,6 +79,7 @@ impl Default for Config { http_api: <_>::default(), http_metrics: <_>::default(), monitoring_api: None, + enable_doppelganger_protection: false, } } } @@ -183,7 +187,7 @@ impl Config { // Copy the provided bytes over. // // Panic-free because `graffiti_bytes.len()` <= `GRAFFITI_BYTES_LEN`. - graffiti[..graffiti_bytes.len()].copy_from_slice(&graffiti_bytes); + graffiti[..graffiti_bytes.len()].copy_from_slice(graffiti_bytes); config.graffiti = Some(graffiti.into()); } @@ -197,6 +201,19 @@ impl Config { config.http_api.enabled = true; } + if let Some(address) = cli_args.value_of("http-address") { + if cli_args.is_present("unencrypted-http-transport") { + config.http_api.listen_addr = address + .parse::() + .map_err(|_| "http-address is not a valid IPv4 address.")?; + } else { + return Err( + "While using `--http-address`, you must also use `--unencrypted-http-transport`." + .to_string(), + ); + } + } + if let Some(port) = cli_args.value_of("http-port") { config.http_api.listen_port = port .parse::() @@ -251,6 +268,10 @@ impl Config { }); } + if cli_args.is_present("enable-doppelganger-protection") { + config.enable_doppelganger_protection = true; + } + Ok(config) } } diff --git a/validator_client/src/doppelganger_service.rs b/validator_client/src/doppelganger_service.rs new file mode 100644 index 00000000000..1281be00b7b --- /dev/null +++ b/validator_client/src/doppelganger_service.rs @@ -0,0 +1,1325 @@ +//! The "Doppelganger" service is an **imperfect** mechanism to try and prevent the validator client +//! from starting whilst any of its validators are actively producing messages on the network. +//! +//! The mechanism works roughly like so: when the validator client starts or a new validator is +//! added, that validator is assigned a number of "remaining epochs". The doppelganger service +//! periodically polls the beacon node to see if that validator has been observed to produce +//! blocks/attestations in each epoch. After the doppelganger service is confident that an epoch has +//! passed without observing that validator, it will decrease the remaining epochs by one. Once the +//! remaining epochs is zero, the doppelganger will consider that validator to be safe-enough to +//! start. +//! +//! If a doppelganger is detected, the entire validator client will exit. +//! +//! For validators started during the genesis epoch, there is **no doppelganger protection!**. This +//! prevents a stale-mate where all validators will cease to function for a few epochs and then all +//! start at the same time. +//! +//! ## Caveat +//! +//! Presently doppelganger protection will never advance if the call at the last slot of each epoch +//! fails. This call is critical to ensuring that validators are able to start performing. +//! +//! ## Disclaimer +//! +//! The Doppelganger service is not perfect. It makes assumptions that any existing validator is +//! performing their duties as required and that the network is able to relay those messages to the +//! beacon node. Among other loop-holes, two validator clients started at the same time will not +//! detect each other. +//! +//! Doppelganger protection is a best-effort, last-line-of-defence mitigation. Do not rely upon it. + +use crate::beacon_node_fallback::{BeaconNodeFallback, RequireSynced}; +use crate::validator_store::ValidatorStore; +use environment::RuntimeContext; +use eth2::types::LivenessResponseData; +use parking_lot::RwLock; +use slog::{crit, error, info, Logger}; +use slot_clock::SlotClock; +use std::collections::HashMap; +use std::future::Future; +use std::sync::Arc; +use task_executor::ShutdownReason; +use tokio::time::sleep; +use types::{Epoch, EthSpec, PublicKeyBytes, Slot}; + +/// A wrapper around `PublicKeyBytes` which encodes information about the status of a validator +/// pubkey with regards to doppelganger protection. +#[derive(Debug, PartialEq)] +pub enum DoppelgangerStatus { + /// Doppelganger protection has approved this for signing. + /// + /// This is because the service has waited some period of time to + /// detect other instances of this key on the network. + SigningEnabled(PublicKeyBytes), + /// Doppelganger protection is still waiting to detect other instances. + /// + /// Do not use this pubkey for signing slashable messages!! + /// + /// However, it can safely be used for other non-slashable operations (e.g., collecting duties + /// or subscribing to subnets). + SigningDisabled(PublicKeyBytes), + /// This pubkey is unknown to the doppelganger service. + /// + /// This represents a serious internal error in the program. This validator will be permanently + /// disabled! + UnknownToDoppelganger(PublicKeyBytes), +} + +impl DoppelgangerStatus { + /// Only return a pubkey if it is explicitly safe for doppelganger protection. + /// + /// If `Some(pubkey)` is returned, doppelganger has declared it safe for signing. + /// + /// ## Note + /// + /// "Safe" is only best-effort by doppelganger. There is no guarantee that a doppelganger + /// doesn't exist. + pub fn only_safe(self) -> Option { + match self { + DoppelgangerStatus::SigningEnabled(pubkey) => Some(pubkey), + DoppelgangerStatus::SigningDisabled(_) => None, + DoppelgangerStatus::UnknownToDoppelganger(_) => None, + } + } + + /// Returns a key regardless of whether or not doppelganger has approved it. Such a key might be + /// used for signing non-slashable messages, duties collection or other activities. + /// + /// If the validator is unknown to doppelganger then `None` will be returned. + pub fn ignored(self) -> Option { + match self { + DoppelgangerStatus::SigningEnabled(pubkey) => Some(pubkey), + DoppelgangerStatus::SigningDisabled(pubkey) => Some(pubkey), + DoppelgangerStatus::UnknownToDoppelganger(_) => None, + } + } + + /// Only return a pubkey if it will not be used for signing due to doppelganger detection. + pub fn only_unsafe(self) -> Option { + match self { + DoppelgangerStatus::SigningEnabled(_) => None, + DoppelgangerStatus::SigningDisabled(pubkey) => Some(pubkey), + DoppelgangerStatus::UnknownToDoppelganger(pubkey) => Some(pubkey), + } + } +} + +struct LivenessResponses { + current_epoch_responses: Vec, + previous_epoch_responses: Vec, +} + +/// The number of epochs that must be checked before we assume that there are no other duplicate +/// validators on the network. +pub const DEFAULT_REMAINING_DETECTION_EPOCHS: u64 = 1; + +/// Store the per-validator status of doppelganger checking. +#[derive(Debug, PartialEq)] +pub struct DoppelgangerState { + /// The next epoch for which the validator should be checked for liveness. + /// + /// Whilst `self.remaining_epochs > 0`, if a validator is found to be live in this epoch or any + /// following then we consider them to have an active doppelganger. + /// + /// Regardless of `self.remaining_epochs`, never indicate for a doppelganger for epochs that are + /// below `next_check_epoch`. This is to avoid the scenario where a user reboots their VC inside + /// a single epoch and we detect the activity of that previous process as doppelganger activity, + /// even when it's not running anymore. + next_check_epoch: Epoch, + /// The number of epochs that must be checked before this validator is considered + /// doppelganger-free. + remaining_epochs: u64, +} + +impl DoppelgangerState { + /// Returns `true` if the validator is *not* safe to sign. + fn requires_further_checks(&self) -> bool { + self.remaining_epochs > 0 + } + + /// Updates the `DoppelgangerState` to consider the given `Epoch`'s doppelganger checks + /// completed. + fn complete_detection_in_epoch(&mut self, epoch: Epoch) { + // The validator has successfully completed doppelganger checks for a new epoch. + self.remaining_epochs = self.remaining_epochs.saturating_sub(1); + + // Since we just satisfied the `previous_epoch`, the next epoch to satisfy should be + // the one following that. + self.next_check_epoch = epoch.saturating_add(1_u64); + } +} + +/// Perform two requests to the BN to obtain the liveness data for `validator_indices`. One +/// request will pertain to the `current_epoch`, the other to the `previous_epoch`. +/// +/// If the BN fails to respond to either of these requests, simply return an empty response. +/// This behaviour is to help prevent spurious failures on the BN from needlessly preventing +/// doppelganger progression. +async fn beacon_node_liveness<'a, T: 'static + SlotClock, E: EthSpec>( + beacon_nodes: Arc>, + log: Logger, + current_epoch: Epoch, + validator_indices: Vec, +) -> LivenessResponses { + let validator_indices = validator_indices.as_slice(); + + let previous_epoch = current_epoch.saturating_sub(1_u64); + + let previous_epoch_responses = if previous_epoch == current_epoch { + // If the previous epoch and the current epoch are the same, don't bother requesting the + // previous epoch indices. + // + // In such a scenario it will be possible to detect validators but we will never update + // any of the doppelganger states. + vec![] + } else { + // Request the previous epoch liveness state from the beacon node. + beacon_nodes + .first_success(RequireSynced::Yes, |beacon_node| async move { + beacon_node + .post_lighthouse_liveness(validator_indices, previous_epoch) + .await + .map_err(|e| format!("Failed query for validator liveness: {:?}", e)) + .map(|result| result.data) + }) + .await + .unwrap_or_else(|e| { + crit!( + log, + "Failed previous epoch liveness query"; + "error" => %e, + "previous_epoch" => %previous_epoch, + ); + // Return an empty vec. In effect, this means to keep trying to make doppelganger + // progress even if some of the calls are failing. + vec![] + }) + }; + + // Request the current epoch liveness state from the beacon node. + let current_epoch_responses = beacon_nodes + .first_success(RequireSynced::Yes, |beacon_node| async move { + beacon_node + .post_lighthouse_liveness(validator_indices, current_epoch) + .await + .map_err(|e| format!("Failed query for validator liveness: {:?}", e)) + .map(|result| result.data) + }) + .await + .unwrap_or_else(|e| { + crit!( + log, + "Failed current epoch liveness query"; + "error" => %e, + "current_epoch" => %current_epoch, + ); + // Return an empty vec. In effect, this means to keep trying to make doppelganger + // progress even if some of the calls are failing. + vec![] + }); + + // Alert the user if the beacon node is omitting validators from the response. + // + // This is not perfect since the validator might return duplicate entries, but it's a quick + // and easy way to detect issues. + if validator_indices.len() != current_epoch_responses.len() + || current_epoch_responses.len() != previous_epoch_responses.len() + { + error!( + log, + "Liveness query omitted validators"; + "previous_epoch_response" => previous_epoch_responses.len(), + "current_epoch_response" => current_epoch_responses.len(), + "requested" => validator_indices.len(), + ) + } + + LivenessResponses { + current_epoch_responses, + previous_epoch_responses, + } +} + +pub struct DoppelgangerService { + doppelganger_states: RwLock>, + log: Logger, +} + +impl DoppelgangerService { + pub fn new(log: Logger) -> Self { + Self { + doppelganger_states: <_>::default(), + log, + } + } + + /// Starts a reoccurring future which will try to keep the doppelganger service updated each + /// slot. + pub fn start_update_service( + service: Arc, + context: RuntimeContext, + validator_store: Arc>, + beacon_nodes: Arc>, + slot_clock: T, + ) -> Result<(), String> { + // Define the `get_index` function as one that uses the validator store. + let get_index = move |pubkey| validator_store.validator_index(&pubkey); + + // Define the `get_liveness` function as one that queries the beacon node API. + let log = service.log.clone(); + let get_liveness = move |current_epoch, validator_indices| { + beacon_node_liveness( + beacon_nodes.clone(), + log.clone(), + current_epoch, + validator_indices, + ) + }; + + let mut shutdown_sender = context.executor.shutdown_sender(); + let log = service.log.clone(); + let mut shutdown_func = move || { + if let Err(e) = + shutdown_sender.try_send(ShutdownReason::Failure("Doppelganger detected.")) + { + crit!( + log, + "Failed to send shutdown signal"; + "msg" => "terminate this process immediately", + "error" => ?e + ); + } + }; + + info!( + service.log, + "Doppelganger detection service started"; + ); + + context.executor.spawn( + async move { + loop { + let slot_duration = slot_clock.slot_duration(); + + if let Some(duration_to_next_slot) = slot_clock.duration_to_next_slot() { + // Run the doppelganger protection check 75% through each epoch. This + // *should* mean that the BN has seen the blocks and attestations for this + // slot. + sleep(duration_to_next_slot + (slot_duration / 4) * 3).await; + } else { + // Just sleep for one slot if we are unable to read the system clock, this gives + // us an opportunity for the clock to eventually come good. + sleep(slot_duration).await; + continue; + } + + if let Some(slot) = slot_clock.now() { + if let Err(e) = service + .detect_doppelgangers::( + slot, + &get_index, + &get_liveness, + &mut shutdown_func, + ) + .await + { + error!( + service.log, + "Error during doppelganger detection"; + "error" => ?e + ); + } + } + } + }, + "doppelganger_service", + ); + Ok(()) + } + + /// Returns the current status of the `validator` in the doppelganger protection process. + pub fn validator_status(&self, validator: PublicKeyBytes) -> DoppelgangerStatus { + self.doppelganger_states + .read() + .get(&validator) + .map(|v| { + if v.requires_further_checks() { + DoppelgangerStatus::SigningDisabled(validator) + } else { + DoppelgangerStatus::SigningEnabled(validator) + } + }) + .unwrap_or_else(|| { + crit!( + self.log, + "Validator unknown to doppelganger service"; + "msg" => "preventing validator from performing duties", + "pubkey" => ?validator + ); + DoppelgangerStatus::UnknownToDoppelganger(validator) + }) + } + + /// Register a new validator with the doppelganger service. + /// + /// Validators added during the genesis epoch will not have doppelganger protection applied to + /// them. + pub fn register_new_validator( + &self, + validator: PublicKeyBytes, + slot_clock: &T, + ) -> Result<(), String> { + let current_epoch = slot_clock + .now() + .ok_or_else(|| "Unable to read slot clock when registering validator".to_string())? + .epoch(E::slots_per_epoch()); + let genesis_epoch = slot_clock.genesis_slot().epoch(E::slots_per_epoch()); + + let remaining_epochs = if current_epoch <= genesis_epoch { + // Disable doppelganger protection when the validator was initialized before genesis. + // + // Without this, all validators would simply miss the first + // `DEFAULT_REMAINING_DETECTION_EPOCHS` epochs and then all start at the same time. This + // would be pointless. + // + // The downside of this is that no validators have doppelganger protection at genesis. + // It's an unfortunate trade-off. + 0 + } else { + DEFAULT_REMAINING_DETECTION_EPOCHS + }; + + let state = DoppelgangerState { + next_check_epoch: current_epoch.saturating_add(1_u64), + remaining_epochs, + }; + + self.doppelganger_states.write().insert(validator, state); + + Ok(()) + } + + /// Contact the beacon node and try to detect if there are any doppelgangers, updating the state + /// of `self`. + /// + /// ## Notes + /// + /// This function is relatively complex when it comes to generic parameters. This is to allow + /// for simple unit testing. Using these generics, we can test the `DoppelgangerService` without + /// needing a BN API or a `ValidatorStore`. + async fn detect_doppelgangers( + &self, + request_slot: Slot, + get_index: &I, + get_liveness: &L, + shutdown_func: &mut S, + ) -> Result<(), String> + where + E: EthSpec, + I: Fn(PublicKeyBytes) -> Option, + L: Fn(Epoch, Vec) -> F, + F: Future, + S: FnMut(), + { + // Get all validators with active doppelganger protection. + let indices_map = self.compute_detection_indices_map(get_index); + + if indices_map.is_empty() { + // Nothing to do. + return Ok(()); + } + + // Get a list of indices to provide to the BN API. + let indices_only = indices_map.iter().map(|(index, _)| *index).collect(); + + // Pull the liveness responses from the BN. + let request_epoch = request_slot.epoch(E::slots_per_epoch()); + let liveness_responses = get_liveness(request_epoch, indices_only).await; + + // Process the responses, attempting to detect doppelgangers. + self.process_liveness_responses::( + request_slot, + liveness_responses, + &indices_map, + shutdown_func, + ) + } + + /// Get a map of `validator_index` -> `validator_pubkey` for all validators still requiring + /// further doppelganger checks. + /// + /// Any validator with an unknown index will be omitted from these results. + fn compute_detection_indices_map(&self, get_index: &F) -> HashMap + where + F: Fn(PublicKeyBytes) -> Option, + { + let detection_pubkeys = self + .doppelganger_states + .read() + .iter() + .filter_map(|(pubkey, state)| { + if state.requires_further_checks() { + Some(*pubkey) + } else { + None + } + }) + .collect::>(); + + // Maps validator indices to pubkeys. + let mut indices_map = HashMap::with_capacity(detection_pubkeys.len()); + + // It is important to ensure that the `self.doppelganger_states` lock is not interleaved with + // any other locks. That is why this is a separate loop to the one that generates + // `detection_pubkeys`. + for pubkey in detection_pubkeys { + if let Some(index) = get_index(pubkey) { + indices_map.insert(index, pubkey); + } + } + + indices_map + } + + /// Process the liveness responses from the BN, potentially updating doppelganger states or + /// shutting down the VC. + fn process_liveness_responses( + &self, + request_slot: Slot, + liveness_responses: LivenessResponses, + indices_map: &HashMap, + shutdown_func: &mut S, + ) -> Result<(), String> + where + S: FnMut(), + { + let request_epoch = request_slot.epoch(E::slots_per_epoch()); + let previous_epoch = request_epoch.saturating_sub(1_u64); + let LivenessResponses { + previous_epoch_responses, + current_epoch_responses, + } = liveness_responses; + + // Perform a loop through the current and previous epoch responses and detect any violators. + // + // A following loop will update the states of each validator, depending on whether or not + // any violators were detected here. + let mut violators = vec![]; + for response in previous_epoch_responses + .iter() + .chain(current_epoch_responses.iter()) + { + if !response.is_live { + continue; + } + + // Resolve the index from the server response back to a public key. + let pubkey = if let Some(pubkey) = indices_map.get(&response.index) { + pubkey + } else { + crit!( + self.log, + "Inconsistent indices map"; + "validator_index" => response.index, + ); + // Skip this result if an inconsistency is detected. + continue; + }; + + let next_check_epoch = if let Some(state) = self.doppelganger_states.read().get(pubkey) + { + state.next_check_epoch + } else { + crit!( + self.log, + "Inconsistent doppelganger state"; + "validator_pubkey" => ?pubkey, + ); + // Skip this result if an inconsistency is detected. + continue; + }; + + if response.is_live && next_check_epoch >= response.epoch { + violators.push(response.index); + } + } + + let violators_exist = !violators.is_empty(); + if violators_exist { + crit!( + self.log, + "Doppelganger(s) detected"; + "msg" => "A doppelganger occurs when two different validator clients run the \ + same public key. This validator client detected another instance of a local \ + validator on the network and is shutting down to prevent potential slashable \ + offences. Ensure that you are not running a duplicate or overlapping \ + validator client", + "doppelganger_indices" => ?violators + ) + } + + // The concept of "epoch satisfaction" is that for some epoch `e` we are *satisfied* that + // we've waited long enough such that we don't expect to see any more consensus messages + // for that epoch. + // + // As it stands now, we consider epoch `e` to be satisfied once we're in the last slot of + // epoch `e + 1`. + // + // The reasoning for this choice of satisfaction slot is that by this point we've + // *probably* seen all the blocks that are permitted to contain attestations from epoch `e`. + let previous_epoch_satisfaction_slot = previous_epoch + .saturating_add(1_u64) + .end_slot(E::slots_per_epoch()); + let previous_epoch_is_satisfied = request_slot >= previous_epoch_satisfaction_slot; + + // Iterate through all the previous epoch responses, updating `self.doppelganger_states`. + // + // Do not bother iterating through the current epoch responses since they've already been + // checked for violators and they don't result in updating the state. + for response in &previous_epoch_responses { + // Sanity check response from the server. + // + // Abort the entire routine if the server starts returning junk. + if response.epoch != previous_epoch { + return Err(format!( + "beacon node returned epoch {}, expecting {}", + response.epoch, previous_epoch + )); + } + + // Resolve the index from the server response back to a public key. + let pubkey = indices_map + .get(&response.index) + // Abort the routine if inconsistency is detected. + .ok_or_else(|| { + format!( + "inconsistent indices map for validator index {}", + response.index + ) + })?; + + // Hold the lock on `self` for the rest of this function. + // + // !! IMPORTANT !! + // + // There is a write-lock being held, avoid interacting with locks until it is dropped. + let mut doppelganger_states = self.doppelganger_states.write(); + let doppelganger_state = doppelganger_states + .get_mut(pubkey) + // Abort the routine if inconsistency is detected. + .ok_or_else(|| format!("inconsistent states for validator pubkey {}", pubkey))?; + + // If a single doppelganger is detected, enable doppelganger checks on all + // validators forever (technically only 2**64 epochs). + // + // This has the effect of stopping validator activity even if the validator client + // fails to shut down. + // + // A weird side-effect is that the BN will keep getting liveness queries that will be + // ignored by the VC. Since the VC *should* shutdown anyway, this seems fine. + if violators_exist { + doppelganger_state.remaining_epochs = u64::MAX; + continue; + } + + let is_newly_satisfied_epoch = previous_epoch_is_satisfied + && previous_epoch >= doppelganger_state.next_check_epoch; + + if !response.is_live && is_newly_satisfied_epoch { + // Update the `doppelganger_state` to consider the previous epoch's checks complete. + doppelganger_state.complete_detection_in_epoch(previous_epoch); + + info!( + self.log, + "Found no doppelganger"; + "further_checks_remaining" => doppelganger_state.remaining_epochs, + "epoch" => response.index, + "validator_index" => response.index + ); + + if doppelganger_state.remaining_epochs == 0 { + info!( + self.log, + "Doppelganger detection complete"; + "msg" => "starting validator", + "validator_index" => response.index + ); + } + } + } + + // Attempt to shutdown the validator client if there are any detected duplicate validators. + if violators_exist { + shutdown_func(); + } + + Ok(()) + } +} + +#[cfg(test)] +mod test { + use super::*; + use environment::null_logger; + use futures::executor::block_on; + use slot_clock::TestingSlotClock; + use std::collections::HashSet; + use std::future; + use std::time::Duration; + use types::{ + test_utils::{SeedableRng, TestRandom, XorShiftRng}, + MainnetEthSpec, + }; + + const DEFAULT_VALIDATORS: usize = 8; + + type E = MainnetEthSpec; + + fn genesis_epoch() -> Epoch { + E::default_spec().genesis_slot.epoch(E::slots_per_epoch()) + } + + fn check_detection_indices(detection_indices: &[u64]) { + assert_eq!( + detection_indices.iter().copied().collect::>(), + (0..DEFAULT_VALIDATORS as u64).collect::>(), + "all validators should be included in detection indices" + ); + } + + struct TestBuilder { + validator_count: usize, + } + + impl Default for TestBuilder { + fn default() -> Self { + Self { + validator_count: DEFAULT_VALIDATORS, + } + } + } + + impl TestBuilder { + fn build(self) -> TestScenario { + let mut rng = XorShiftRng::from_seed([42; 16]); + let slot_clock = + TestingSlotClock::new(Slot::new(0), Duration::from_secs(0), Duration::from_secs(1)); + let log = null_logger().unwrap(); + + TestScenario { + validators: (0..self.validator_count) + .map(|_| PublicKeyBytes::random_for_test(&mut rng)) + .collect(), + doppelganger: DoppelgangerService::new(log), + slot_clock, + } + } + } + + struct TestScenario { + validators: Vec, + doppelganger: DoppelgangerService, + slot_clock: TestingSlotClock, + } + + impl TestScenario { + pub fn pubkey_to_index_map(&self) -> HashMap { + self.validators + .iter() + .enumerate() + .map(|(index, pubkey)| (*pubkey, index as u64)) + .collect() + } + + pub fn set_slot(self, slot: Slot) -> Self { + self.slot_clock.set_slot(slot.into()); + self + } + + pub fn register_all_in_doppelganger_protection_if_enabled(self) -> Self { + let mut this = self; + for i in 0..this.validators.len() { + this = this.register_validator(i as u64); + } + this + } + + pub fn register_validators(self, validators: &[u64]) -> Self { + let mut this = self; + for i in validators { + this = this.register_validator(*i); + } + this + } + + pub fn register_validator(self, index: u64) -> Self { + let pubkey = *self + .validators + .get(index as usize) + .expect("index should exist"); + + self.doppelganger + .register_new_validator::(pubkey, &self.slot_clock) + .unwrap(); + self.doppelganger + .doppelganger_states + .read() + .get(&pubkey) + .expect("validator should be registered"); + + self + } + + pub fn assert_all_enabled(self) -> Self { + /* + * 1. Ensure all validators have the correct status. + */ + for validator in &self.validators { + assert_eq!( + self.doppelganger.validator_status(*validator), + DoppelgangerStatus::SigningEnabled(*validator), + "all validators should be enabled" + ); + } + + /* + * 2. Ensure a correct detection indices map is generated. + */ + let pubkey_to_index = self.pubkey_to_index_map(); + let generated_map = self + .doppelganger + .compute_detection_indices_map(&|pubkey| pubkey_to_index.get(&pubkey).copied()); + assert!( + generated_map.is_empty(), + "there should be no indices for detection if all validators are enabled" + ); + + self + } + + pub fn assert_all_disabled(self) -> Self { + /* + * 1. Ensure all validators have the correct status. + */ + for validator in &self.validators { + assert_eq!( + self.doppelganger.validator_status(*validator), + DoppelgangerStatus::SigningDisabled(*validator), + "all validators should be disabled" + ); + } + + /* + * 2. Ensure a correct detection indices map is generated. + */ + let pubkey_to_index = self.pubkey_to_index_map(); + let generated_map = self + .doppelganger + .compute_detection_indices_map(&|pubkey| pubkey_to_index.get(&pubkey).copied()); + + assert_eq!( + pubkey_to_index.len(), + generated_map.len(), + "should declare all indices for detection" + ); + for (pubkey, index) in pubkey_to_index { + assert_eq!( + generated_map.get(&index), + Some(&pubkey), + "map should be consistent" + ); + } + + self + } + + pub fn assert_all_states(self, state: &DoppelgangerState) -> Self { + let mut this = self; + for i in 0..this.validators.len() { + this = this.assert_state(i as u64, state); + } + this + } + + pub fn assert_state(self, index: u64, state: &DoppelgangerState) -> Self { + let pubkey = *self + .validators + .get(index as usize) + .expect("index should exist"); + + assert_eq!( + self.doppelganger + .doppelganger_states + .read() + .get(&pubkey) + .expect("validator should be present"), + state, + "validator should match provided state" + ); + + self + } + + pub fn assert_unregistered(self, index: u64) -> Self { + let pubkey = *self + .validators + .get(index as usize) + .expect("index should exist in test scenario"); + + assert!( + self.doppelganger + .doppelganger_states + .read() + .get(&pubkey) + .is_none(), + "validator should not be present in states" + ); + + assert_eq!( + self.doppelganger.validator_status(pubkey), + DoppelgangerStatus::UnknownToDoppelganger(pubkey), + "validator status should be unknown" + ); + + self + } + } + + #[test] + fn enabled_in_genesis_epoch() { + for slot in genesis_epoch().slot_iter(E::slots_per_epoch()) { + TestBuilder::default() + .build() + .set_slot(slot) + .register_all_in_doppelganger_protection_if_enabled() + .assert_all_enabled() + .assert_all_states(&DoppelgangerState { + next_check_epoch: genesis_epoch() + 1, + remaining_epochs: 0, + }); + } + } + + #[test] + fn disabled_after_genesis_epoch() { + let epoch = genesis_epoch() + 1; + + for slot in epoch.slot_iter(E::slots_per_epoch()) { + TestBuilder::default() + .build() + .set_slot(slot) + .register_all_in_doppelganger_protection_if_enabled() + .assert_all_disabled() + .assert_all_states(&DoppelgangerState { + next_check_epoch: epoch + 1, + remaining_epochs: DEFAULT_REMAINING_DETECTION_EPOCHS, + }); + } + } + + #[test] + fn unregistered_validator() { + // Non-genesis epoch + let epoch = genesis_epoch() + 2; + + TestBuilder::default() + .build() + .set_slot(epoch.start_slot(E::slots_per_epoch())) + // Register only validator 1. + .register_validator(1) + // Ensure validator 1 was registered. + .assert_state( + 1, + &DoppelgangerState { + next_check_epoch: epoch + 1, + remaining_epochs: DEFAULT_REMAINING_DETECTION_EPOCHS, + }, + ) + // Ensure validator 2 was not registered. + .assert_unregistered(2); + } + + enum ShouldShutdown { + Yes, + No, + } + + fn get_false_responses(current_epoch: Epoch, detection_indices: &[u64]) -> LivenessResponses { + LivenessResponses { + current_epoch_responses: detection_indices + .iter() + .map(|i| LivenessResponseData { + index: *i as u64, + epoch: current_epoch, + is_live: false, + }) + .collect(), + previous_epoch_responses: detection_indices + .iter() + .map(|i| LivenessResponseData { + index: *i as u64, + epoch: current_epoch - 1, + is_live: false, + }) + .collect(), + } + } + + impl TestScenario { + pub fn simulate_detect_doppelgangers( + self, + slot: Slot, + should_shutdown: ShouldShutdown, + get_liveness: L, + ) -> Self + where + L: Fn(Epoch, Vec) -> F, + F: Future, + { + // Create a simulated shutdown sender. + let mut did_shutdown = false; + let mut shutdown_func = || did_shutdown = true; + + // Create a simulated validator store that can resolve pubkeys to indices. + let pubkey_to_index = self.pubkey_to_index_map(); + let get_index = |pubkey| pubkey_to_index.get(&pubkey).copied(); + + block_on(self.doppelganger.detect_doppelgangers::( + slot, + &get_index, + &get_liveness, + &mut shutdown_func, + )) + .expect("detection should not error"); + + match should_shutdown { + ShouldShutdown::Yes if !did_shutdown => panic!("vc failed to shutdown"), + ShouldShutdown::No if did_shutdown => panic!("vc shutdown when it shouldn't"), + _ => (), + } + + self + } + } + + #[test] + fn detect_at_genesis() { + let epoch = genesis_epoch(); + let slot = epoch.start_slot(E::slots_per_epoch()); + + TestBuilder::default() + .build() + .set_slot(slot) + .register_all_in_doppelganger_protection_if_enabled() + // All validators should have signing enabled since it's the genesis epoch. + .assert_all_enabled() + .simulate_detect_doppelgangers( + slot, + ShouldShutdown::No, + |_, _| { + panic!("the beacon node should not get a request if there are no doppelganger validators"); + + // The compiler needs this, otherwise it complains that this isn't a future. + #[allow(unreachable_code)] + future::ready(get_false_responses(Epoch::new(0), &[])) + }, + ) + // All validators should be enabled. + .assert_all_enabled(); + } + + fn detect_after_genesis_test(mutate_responses: F) + where + F: Fn(&mut LivenessResponses), + { + let epoch = genesis_epoch() + 1; + let slot = epoch.start_slot(E::slots_per_epoch()); + + TestBuilder::default() + .build() + .set_slot(slot) + .register_all_in_doppelganger_protection_if_enabled() + .assert_all_disabled() + // First, simulate a check where there are no doppelgangers. + .simulate_detect_doppelgangers( + slot, + ShouldShutdown::No, + |current_epoch, detection_indices: Vec<_>| { + assert_eq!(current_epoch, epoch); + check_detection_indices(&detection_indices); + + let liveness_responses = get_false_responses(current_epoch, &detection_indices); + + future::ready(liveness_responses) + }, + ) + // All validators should be disabled since they started after genesis. + .assert_all_disabled() + // Now, simulate a check where we apply `mutate_responses` which *must* create some + // doppelgangers. + .simulate_detect_doppelgangers( + // Perform this check in the next slot. + slot + 1, + ShouldShutdown::Yes, + |current_epoch, detection_indices: Vec<_>| { + assert_eq!(current_epoch, epoch); + check_detection_indices(&detection_indices); + + let mut liveness_responses = + get_false_responses(current_epoch, &detection_indices); + + mutate_responses(&mut liveness_responses); + + future::ready(liveness_responses) + }, + ) + // All validators should still be disabled. + .assert_all_disabled() + // The states of all validators should be jammed with `u64::max_value()`. + .assert_all_states(&DoppelgangerState { + next_check_epoch: epoch + 1, + remaining_epochs: u64::MAX, + }); + } + + #[test] + fn detect_after_genesis_with_current_epoch_doppelganger() { + detect_after_genesis_test(|liveness_responses| { + liveness_responses.current_epoch_responses[0].is_live = true + }) + } + + #[test] + fn detect_after_genesis_with_previous_epoch_doppelganger() { + detect_after_genesis_test(|liveness_responses| { + liveness_responses.previous_epoch_responses[0].is_live = true + }) + } + + #[test] + fn no_doppelgangers_for_adequate_time() { + let initial_epoch = genesis_epoch() + 42; + let initial_slot = initial_epoch.start_slot(E::slots_per_epoch()); + let activation_slot = + (initial_epoch + DEFAULT_REMAINING_DETECTION_EPOCHS + 1).end_slot(E::slots_per_epoch()); + + let mut scenario = TestBuilder::default() + .build() + .set_slot(initial_slot) + .register_all_in_doppelganger_protection_if_enabled() + .assert_all_disabled(); + + for slot in initial_slot.as_u64()..=activation_slot.as_u64() { + let slot = Slot::new(slot); + let epoch = slot.epoch(E::slots_per_epoch()); + + scenario = scenario.simulate_detect_doppelgangers( + slot, + ShouldShutdown::No, + |current_epoch, detection_indices: Vec<_>| { + assert_eq!(current_epoch, epoch); + check_detection_indices(&detection_indices); + + let liveness_responses = get_false_responses(current_epoch, &detection_indices); + + future::ready(liveness_responses) + }, + ); + + let is_first_epoch = epoch == initial_epoch; + let is_second_epoch = epoch == initial_epoch + 1; + let is_satisfaction_slot = slot == epoch.end_slot(E::slots_per_epoch()); + let epochs_since_start = epoch.as_u64().checked_sub(initial_epoch.as_u64()).unwrap(); + + let expected_state = if is_first_epoch || is_second_epoch { + DoppelgangerState { + next_check_epoch: initial_epoch + 1, + remaining_epochs: DEFAULT_REMAINING_DETECTION_EPOCHS, + } + } else if !is_satisfaction_slot { + DoppelgangerState { + next_check_epoch: epoch - 1, + remaining_epochs: DEFAULT_REMAINING_DETECTION_EPOCHS + .saturating_sub(epochs_since_start.saturating_sub(2)), + } + } else { + DoppelgangerState { + next_check_epoch: epoch, + remaining_epochs: DEFAULT_REMAINING_DETECTION_EPOCHS + .saturating_sub(epochs_since_start.saturating_sub(1)), + } + }; + + scenario = scenario.assert_all_states(&expected_state); + + scenario = if slot < activation_slot { + scenario.assert_all_disabled() + } else { + scenario.assert_all_enabled() + }; + } + + scenario + .assert_all_enabled() + .assert_all_states(&DoppelgangerState { + next_check_epoch: activation_slot.epoch(E::slots_per_epoch()), + remaining_epochs: 0, + }); + } + + #[test] + fn time_skips_forward() { + let initial_epoch = genesis_epoch() + 1; + let initial_slot = initial_epoch.start_slot(E::slots_per_epoch()); + let skipped_forward_epoch = initial_epoch + 42; + let skipped_forward_slot = skipped_forward_epoch.end_slot(E::slots_per_epoch()); + + TestBuilder::default() + .build() + .set_slot(initial_slot) + .register_all_in_doppelganger_protection_if_enabled() + .assert_all_disabled() + // First, simulate a check in the initialization epoch. + .simulate_detect_doppelgangers( + initial_slot, + ShouldShutdown::No, + |current_epoch, detection_indices: Vec<_>| { + assert_eq!(current_epoch, initial_epoch); + check_detection_indices(&detection_indices); + + future::ready(get_false_responses(current_epoch, &detection_indices)) + }, + ) + .assert_all_disabled() + .assert_all_states(&DoppelgangerState { + next_check_epoch: initial_epoch + 1, + remaining_epochs: DEFAULT_REMAINING_DETECTION_EPOCHS, + }) + // Simulate a check in the skipped forward slot + .simulate_detect_doppelgangers( + skipped_forward_slot, + ShouldShutdown::No, + |current_epoch, detection_indices: Vec<_>| { + assert_eq!(current_epoch, skipped_forward_epoch); + check_detection_indices(&detection_indices); + + future::ready(get_false_responses(current_epoch, &detection_indices)) + }, + ) + .assert_all_states(&DoppelgangerState { + next_check_epoch: skipped_forward_epoch, + remaining_epochs: 0, + }); + } + + #[test] + fn time_skips_backward() { + let initial_epoch = genesis_epoch() + 42; + let initial_slot = initial_epoch.start_slot(E::slots_per_epoch()); + let skipped_backward_epoch = initial_epoch - 12; + let skipped_backward_slot = skipped_backward_epoch.end_slot(E::slots_per_epoch()); + + TestBuilder::default() + .build() + .set_slot(initial_slot) + .register_all_in_doppelganger_protection_if_enabled() + .assert_all_disabled() + // First, simulate a check in the initialization epoch. + .simulate_detect_doppelgangers( + initial_slot, + ShouldShutdown::No, + |current_epoch, detection_indices: Vec<_>| { + assert_eq!(current_epoch, initial_epoch); + check_detection_indices(&detection_indices); + + future::ready(get_false_responses(current_epoch, &detection_indices)) + }, + ) + .assert_all_disabled() + .assert_all_states(&DoppelgangerState { + next_check_epoch: initial_epoch + 1, + remaining_epochs: DEFAULT_REMAINING_DETECTION_EPOCHS, + }) + // Simulate a check in the skipped backward slot + .simulate_detect_doppelgangers( + skipped_backward_slot, + ShouldShutdown::No, + |current_epoch, detection_indices: Vec<_>| { + assert_eq!(current_epoch, skipped_backward_epoch); + check_detection_indices(&detection_indices); + + future::ready(get_false_responses(current_epoch, &detection_indices)) + }, + ) + .assert_all_disabled() + // When time skips backward we should *not* allow doppelganger advancement. + .assert_all_states(&DoppelgangerState { + next_check_epoch: initial_epoch + 1, + remaining_epochs: DEFAULT_REMAINING_DETECTION_EPOCHS, + }); + } + + #[test] + fn staggered_entry() { + let early_epoch = genesis_epoch() + 42; + let early_slot = early_epoch.start_slot(E::slots_per_epoch()); + let early_activation_slot = + (early_epoch + DEFAULT_REMAINING_DETECTION_EPOCHS + 1).end_slot(E::slots_per_epoch()); + + let late_epoch = early_epoch + 1; + let late_slot = late_epoch.start_slot(E::slots_per_epoch()); + let late_activation_slot = + (late_epoch + DEFAULT_REMAINING_DETECTION_EPOCHS + 1).end_slot(E::slots_per_epoch()); + + let early_validators: Vec = (0..DEFAULT_VALIDATORS as u64 / 2).collect(); + let late_validators: Vec = + (DEFAULT_VALIDATORS as u64 / 2..DEFAULT_VALIDATORS as u64).collect(); + + let mut scenario = TestBuilder::default() + .build() + .set_slot(early_slot) + .register_validators(&early_validators) + .set_slot(late_slot) + .register_validators(&late_validators) + .assert_all_disabled(); + + for slot in early_slot.as_u64()..=late_activation_slot.as_u64() { + let slot = Slot::new(slot); + + scenario = scenario.simulate_detect_doppelgangers( + slot, + ShouldShutdown::No, + |current_epoch, detection_indices: Vec<_>| { + future::ready(get_false_responses(current_epoch, &detection_indices)) + }, + ); + + for index in 0..DEFAULT_VALIDATORS as u64 { + let pubkey = *scenario.validators.get(index as usize).unwrap(); + + let should_be_disabled = if early_validators.contains(&index) { + slot < early_activation_slot + } else if late_validators.contains(&index) { + slot < late_activation_slot + } else { + unreachable!("inconsistent test"); + }; + + if should_be_disabled { + assert_eq!( + scenario.doppelganger.validator_status(pubkey), + DoppelgangerStatus::SigningDisabled(pubkey) + ) + } else { + assert_eq!( + scenario.doppelganger.validator_status(pubkey), + DoppelgangerStatus::SigningEnabled(pubkey) + ) + } + } + } + + scenario.assert_all_enabled(); + } +} diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index aa7edd29e5a..1a667d1cb29 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -8,7 +8,9 @@ use crate::beacon_node_fallback::{BeaconNodeFallback, RequireSynced}; use crate::{ - block_service::BlockServiceNotification, http_metrics::metrics, validator_store::ValidatorStore, + block_service::BlockServiceNotification, + http_metrics::metrics, + validator_store::{DoppelgangerStatus, Error as ValidatorStoreError, ValidatorStore}, }; use environment::RuntimeContext; use eth2::types::{AttesterData, BeaconCommitteeSubscription, ProposerData, StateId, ValidatorId}; @@ -36,7 +38,7 @@ const HISTORICAL_DUTIES_EPOCHS: u64 = 2; pub enum Error { UnableToReadSlotClock, FailedToDownloadAttesters(String), - FailedToProduceSelectionProof, + FailedToProduceSelectionProof(ValidatorStoreError), InvalidModulo(ArithError), } @@ -56,8 +58,8 @@ impl DutyAndProof { spec: &ChainSpec, ) -> Result { let selection_proof = validator_store - .produce_selection_proof(&duty.pubkey, duty.slot) - .ok_or(Error::FailedToProduceSelectionProof)?; + .produce_selection_proof(duty.pubkey, duty.slot) + .map_err(Error::FailedToProduceSelectionProof)?; let selection_proof = selection_proof .is_aggregator(duty.committee_length as usize, spec) @@ -84,7 +86,6 @@ type DependentRoot = Hash256; type AttesterMap = HashMap>; type ProposerMap = HashMap)>; -type IndicesMap = HashMap; /// See the module-level documentation. pub struct DutiesService { @@ -93,11 +94,8 @@ pub struct DutiesService { /// Maps an epoch to all *local* proposers in this epoch. Notably, this does not contain /// proposals for any validators which are not registered locally. pub proposers: RwLock, - /// Maps a public key to a validator index. There is a task which ensures this map is kept - /// up-to-date. - pub indices: RwLock, /// Provides the canonical list of locally-managed validators. - pub validator_store: ValidatorStore, + pub validator_store: Arc>, /// Tracks the current slot. pub slot_clock: T, /// Provides HTTP access to remote beacon nodes. @@ -119,21 +117,44 @@ impl DutiesService { /// Returns the total number of validators that should propose in the given epoch. pub fn proposer_count(&self, epoch: Epoch) -> usize { + // Only collect validators that are considered safe in terms of doppelganger protection. + let signing_pubkeys: HashSet<_> = self + .validator_store + .voting_pubkeys(DoppelgangerStatus::only_safe); + self.proposers .read() .get(&epoch) - .map_or(0, |(_, proposers)| proposers.len()) + .map_or(0, |(_, proposers)| { + proposers + .iter() + .filter(|proposer_data| signing_pubkeys.contains(&proposer_data.pubkey)) + .count() + }) } /// Returns the total number of validators that should attest in the given epoch. pub fn attester_count(&self, epoch: Epoch) -> usize { + // Only collect validators that are considered safe in terms of doppelganger protection. + let signing_pubkeys: HashSet<_> = self + .validator_store + .voting_pubkeys(DoppelgangerStatus::only_safe); self.attesters .read() .iter() - .filter(|(_, map)| map.contains_key(&epoch)) + .filter_map(|(_, map)| map.get(&epoch)) + .map(|(_, duty_and_proof)| duty_and_proof) + .filter(|duty_and_proof| signing_pubkeys.contains(&duty_and_proof.duty.pubkey)) .count() } + /// Returns the total number of validators that are in a doppelganger detection period. + pub fn doppelganger_detecting_count(&self) -> usize { + self.validator_store + .voting_pubkeys::, _>(DoppelgangerStatus::only_unsafe) + .len() + } + /// Returns the pubkeys of the validators which are assigned to propose in the given slot. /// /// It is possible that multiple validators have an identical proposal slot, however that is @@ -141,13 +162,21 @@ impl DutiesService { pub fn block_proposers(&self, slot: Slot) -> HashSet { let epoch = slot.epoch(E::slots_per_epoch()); + // Only collect validators that are considered safe in terms of doppelganger protection. + let signing_pubkeys: HashSet<_> = self + .validator_store + .voting_pubkeys(DoppelgangerStatus::only_safe); + self.proposers .read() .get(&epoch) .map(|(_, proposers)| { proposers .iter() - .filter(|proposer_data| proposer_data.slot == slot) + .filter(|proposer_data| { + proposer_data.slot == slot + && signing_pubkeys.contains(&proposer_data.pubkey) + }) .map(|proposer_data| proposer_data.pubkey) .collect() }) @@ -158,12 +187,20 @@ impl DutiesService { pub fn attesters(&self, slot: Slot) -> Vec { let epoch = slot.epoch(E::slots_per_epoch()); + // Only collect validators that are considered safe in terms of doppelganger protection. + let signing_pubkeys: HashSet<_> = self + .validator_store + .voting_pubkeys(DoppelgangerStatus::only_safe); + self.attesters .read() .iter() .filter_map(|(_, map)| map.get(&epoch)) .map(|(_, duty_and_proof)| duty_and_proof) - .filter(|duty_and_proof| duty_and_proof.duty.slot == slot) + .filter(|duty_and_proof| { + duty_and_proof.duty.slot == slot + && signing_pubkeys.contains(&duty_and_proof.duty.pubkey) + }) .cloned() .collect() } @@ -276,9 +313,23 @@ async fn poll_validator_indices( metrics::start_timer_vec(&metrics::DUTIES_SERVICE_TIMES, &[metrics::UPDATE_INDICES]); let log = duties_service.context.log(); - for pubkey in duties_service.validator_store.voting_pubkeys() { + + // Collect *all* pubkeys for resolving indices, even those undergoing doppelganger protection. + // + // Since doppelganger protection queries rely on validator indices it is important to ensure we + // collect those indices. + let all_pubkeys: Vec<_> = duties_service + .validator_store + .voting_pubkeys(DoppelgangerStatus::ignored); + + for pubkey in all_pubkeys { // This is on its own line to avoid some weirdness with locks and if statements. - let is_known = duties_service.indices.read().contains_key(&pubkey); + let is_known = duties_service + .validator_store + .initialized_validators() + .read() + .get_index(&pubkey) + .is_some(); if !is_known { // Query the remote BN to resolve a pubkey to a validator index. @@ -307,9 +358,10 @@ async fn poll_validator_indices( "validator_index" => response.data.index ); duties_service - .indices + .validator_store + .initialized_validators() .write() - .insert(pubkey, response.data.index); + .set_index(&pubkey, response.data.index); } // This is not necessarily an error, it just means the validator is not yet known to // the beacon chain. @@ -359,18 +411,22 @@ async fn poll_beacon_attesters( let current_epoch = current_slot.epoch(E::slots_per_epoch()); let next_epoch = current_epoch + 1; - let local_pubkeys: HashSet = duties_service + // Collect *all* pubkeys, even those undergoing doppelganger protection. + // + // We must know the duties for doppelganger validators so that we can subscribe to their subnets + // and get more information about other running instances. + let local_pubkeys: HashSet<_> = duties_service .validator_store - .voting_pubkeys() - .into_iter() - .collect(); + .voting_pubkeys(DoppelgangerStatus::ignored); let local_indices = { let mut local_indices = Vec::with_capacity(local_pubkeys.len()); - let indices_map = duties_service.indices.read(); + + let vals_ref = duties_service.validator_store.initialized_validators(); + let vals = vals_ref.read(); for &pubkey in &local_pubkeys { - if let Some(validator_index) = indices_map.get(&pubkey) { - local_indices.push(*validator_index) + if let Some(validator_index) = vals.get_index(&pubkey) { + local_indices.push(validator_index) } } local_indices @@ -378,7 +434,7 @@ async fn poll_beacon_attesters( // Download the duties and update the duties for the current epoch. if let Err(e) = poll_beacon_attesters_for_epoch( - &duties_service, + duties_service, current_epoch, &local_indices, &local_pubkeys, @@ -402,7 +458,7 @@ async fn poll_beacon_attesters( // Download the duties and update the duties for the next epoch. if let Err(e) = - poll_beacon_attesters_for_epoch(&duties_service, next_epoch, &local_indices, &local_pubkeys) + poll_beacon_attesters_for_epoch(duties_service, next_epoch, &local_indices, &local_pubkeys) .await { error!( @@ -431,7 +487,7 @@ async fn poll_beacon_attesters( .attesters .read() .iter() - .filter_map(|(_, map)| map.get(&epoch)) + .filter_map(|(_, map)| map.get(epoch)) // The BN logs a warning if we try and subscribe to current or near-by slots. Give it a // buffer. .filter(|(_, duty_and_proof)| { @@ -636,15 +692,18 @@ async fn poll_beacon_proposers( current_slot, &initial_block_proposers, block_service_tx, - &log, + &duties_service.validator_store, + log, ) .await; - let local_pubkeys: HashSet = duties_service + // Collect *all* pubkeys, even those undergoing doppelganger protection. + // + // It is useful to keep the duties for all validators around, so they're on hand when + // doppelganger finishes. + let local_pubkeys: HashSet<_> = duties_service .validator_store - .voting_pubkeys() - .into_iter() - .collect(); + .voting_pubkeys(DoppelgangerStatus::ignored); // Only download duties and push out additional block production events if we have some // validators. @@ -723,7 +782,8 @@ async fn poll_beacon_proposers( current_slot, &additional_block_producers, block_service_tx, - &log, + &duties_service.validator_store, + log, ) .await; debug!( @@ -745,24 +805,33 @@ async fn poll_beacon_proposers( } /// Notify the block service if it should produce a block. -async fn notify_block_production_service( +async fn notify_block_production_service( current_slot: Slot, block_proposers: &HashSet, block_service_tx: &mut Sender, + validator_store: &ValidatorStore, log: &Logger, ) { - if let Err(e) = block_service_tx - .send(BlockServiceNotification { - slot: current_slot, - block_proposers: block_proposers.iter().copied().collect(), - }) - .await - { - error!( - log, - "Failed to notify block service"; - "current_slot" => current_slot, - "error" => %e - ); - }; + let non_doppelganger_proposers = block_proposers + .iter() + .filter(|pubkey| validator_store.doppelganger_protection_allows_signing(**pubkey)) + .copied() + .collect::>(); + + if !non_doppelganger_proposers.is_empty() { + if let Err(e) = block_service_tx + .send(BlockServiceNotification { + slot: current_slot, + block_proposers: non_doppelganger_proposers, + }) + .await + { + error!( + log, + "Failed to notify block service"; + "current_slot" => current_slot, + "error" => %e + ); + }; + } } diff --git a/validator_client/src/fork_service.rs b/validator_client/src/fork_service.rs index 2c2df187b23..f5d39e397c9 100644 --- a/validator_client/src/fork_service.rs +++ b/validator_client/src/fork_service.rs @@ -137,6 +137,11 @@ impl ForkService { *self.fork.read() } + /// Returns the slot clock. + pub fn slot_clock(&self) -> T { + self.slot_clock.clone() + } + /// Starts the service that periodically polls for the `Fork`. pub fn start_update_service(self, context: &RuntimeContext) -> Result<(), String> { // Run an immediate update before starting the updater service. diff --git a/validator_client/src/graffiti_file.rs b/validator_client/src/graffiti_file.rs index 0df2cdb1074..5c1f84e10b3 100644 --- a/validator_client/src/graffiti_file.rs +++ b/validator_client/src/graffiti_file.rs @@ -9,6 +9,7 @@ use bls::PublicKeyBytes; use types::{graffiti::GraffitiString, Graffiti}; #[derive(Debug)] +#[allow(clippy::enum_variant_names)] pub enum Error { InvalidFile(std::io::Error), InvalidLine(String), @@ -91,7 +92,7 @@ fn read_line(line: &str) -> Result<(Option, Graffiti), Error> { if key == "default" { Ok((None, graffiti)) } else { - let pk = PublicKeyBytes::from_str(&key).map_err(Error::InvalidPublicKey)?; + let pk = PublicKeyBytes::from_str(key).map_err(Error::InvalidPublicKey)?; Ok((Some(pk), graffiti)) } } else { diff --git a/validator_client/src/http_api/api_secret.rs b/validator_client/src/http_api/api_secret.rs index d3e5c2d1253..f75a048bed7 100644 --- a/validator_client/src/http_api/api_secret.rs +++ b/validator_client/src/http_api/api_secret.rs @@ -1,7 +1,7 @@ use eth2::lighthouse_vc::{PK_LEN, SECRET_PREFIX as PK_PREFIX}; +use libsecp256k1::{Message, PublicKey, SecretKey}; use rand::thread_rng; use ring::digest::{digest, SHA256}; -use secp256k1::{Message, PublicKey, SecretKey}; use std::fs; use std::path::Path; use warp::Filter; @@ -173,11 +173,11 @@ impl ApiSecret { /// Returns a closure which produces a signature over some bytes using the secret key in /// `self`. The signature is a 32-byte hash formatted as a 0x-prefixed string. pub fn signer(&self) -> impl Fn(&[u8]) -> String + Clone { - let sk = self.sk.clone(); + let sk = self.sk; move |input: &[u8]| -> String { let message = Message::parse_slice(digest(&SHA256, input).as_ref()).expect("sha256 is 32 bytes"); - let (signature, _) = secp256k1::sign(&message, &sk); + let (signature, _) = libsecp256k1::sign(&message, &sk); serde_utils::hex::encode(signature.serialize_der().as_ref()) } } diff --git a/validator_client/src/http_api/create_validator.rs b/validator_client/src/http_api/create_validator.rs index 5a6da2bc9c2..30625d9f4d5 100644 --- a/validator_client/src/http_api/create_validator.rs +++ b/validator_client/src/http_api/create_validator.rs @@ -97,7 +97,7 @@ pub async fn create_validators, T: 'static + SlotClock, E: EthSpe let validator_dir = ValidatorDirBuilder::new(validator_dir.as_ref().into()) .voting_keystore(keystores.voting, voting_password.as_bytes()) .withdrawal_keystore(keystores.withdrawal, withdrawal_password.as_bytes()) - .create_eth1_tx_data(request.deposit_gwei, &spec) + .create_eth1_tx_data(request.deposit_gwei, spec) .store_withdrawal_keystore(false) .build() .map_err(|e| { diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index bc820ce44e5..87d8ae31354 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -50,10 +50,10 @@ impl From for Error { /// A wrapper around all the items required to spawn the HTTP server. /// /// The server will gracefully handle the case where any fields are `None`. -pub struct Context { +pub struct Context { pub runtime: Weak, pub api_secret: ApiSecret, - pub validator_store: Option>, + pub validator_store: Option>>, pub validator_dir: Option, pub spec: ChainSpec, pub config: Config, @@ -203,7 +203,7 @@ pub fn serve( .and(warp::path::end()) .and(validator_store_filter.clone()) .and(signer.clone()) - .and_then(|validator_store: ValidatorStore, signer| { + .and_then(|validator_store: Arc>, signer| { blocking_signed_json_task(signer, move || { let validators = validator_store .initialized_validators() @@ -229,7 +229,7 @@ pub fn serve( .and(validator_store_filter.clone()) .and(signer.clone()) .and_then( - |validator_pubkey: PublicKey, validator_store: ValidatorStore, signer| { + |validator_pubkey: PublicKey, validator_store: Arc>, signer| { blocking_signed_json_task(signer, move || { let validator = validator_store .initialized_validators() @@ -267,7 +267,7 @@ pub fn serve( .and_then( |body: Vec, validator_dir: PathBuf, - validator_store: ValidatorStore, + validator_store: Arc>, spec: Arc, signer, runtime: Weak| { @@ -309,7 +309,7 @@ pub fn serve( .and_then( |body: api_types::CreateValidatorsMnemonicRequest, validator_dir: PathBuf, - validator_store: ValidatorStore, + validator_store: Arc>, spec: Arc, signer, runtime: Weak| { @@ -353,7 +353,7 @@ pub fn serve( .and_then( |body: api_types::KeystoreValidatorsPostRequest, validator_dir: PathBuf, - validator_store: ValidatorStore, + validator_store: Arc>, signer, runtime: Weak| { blocking_signed_json_task(signer, move || { @@ -428,7 +428,7 @@ pub fn serve( .and_then( |validator_pubkey: PublicKey, body: api_types::ValidatorPatchRequest, - validator_store: ValidatorStore, + validator_store: Arc>, signer, runtime: Weak| { blocking_signed_json_task(signer, move || { diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index cf2618bba11..fd8b2b9e730 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -1,6 +1,7 @@ #![cfg(test)] #![cfg(not(debug_assertions))] +use crate::doppelganger_service::DoppelgangerService; use crate::{ http_api::{ApiSecret, Config as HttpConfig, Context}, Config, ForkServiceBuilder, InitializedValidators, ValidatorDefinitions, ValidatorStore, @@ -85,16 +86,21 @@ impl ApiTester { Hash256::repeat_byte(42), spec, fork_service.clone(), + Some(Arc::new(DoppelgangerService::new(log.clone()))), log.clone(), ); + validator_store + .register_all_in_doppelganger_protection_if_enabled() + .expect("Should attach doppelganger service"); + let initialized_validators = validator_store.initialized_validators(); let context: Arc> = Arc::new(Context { runtime, api_secret, validator_dir: Some(validator_dir.path().into()), - validator_store: Some(validator_store), + validator_store: Some(Arc::new(validator_store)), spec: E::default_spec(), config: HttpConfig { enabled: true, diff --git a/validator_client/src/http_metrics/mod.rs b/validator_client/src/http_metrics/mod.rs index bb80e20f433..fcf98987adf 100644 --- a/validator_client/src/http_metrics/mod.rs +++ b/validator_client/src/http_metrics/mod.rs @@ -35,7 +35,7 @@ impl From for Error { /// Contains objects which have shared access from inside/outside of the metrics server. pub struct Shared { - pub validator_store: Option>, + pub validator_store: Option>>, pub duties_service: Option>>, pub genesis_time: Option, } diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index c471adcc8d8..4e9bbef76f9 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -33,7 +33,7 @@ const USE_STDIN: bool = false; pub enum Error { /// Refused to open a validator with an existing lockfile since that validator may be in-use by /// another process. - LockfileError(LockfileError), + Lockfile(LockfileError), /// The voting public key in the definition did not match the one in the keystore. VotingPublicKeyMismatch { definition: Box, @@ -62,11 +62,15 @@ pub enum Error { TokioJoin(tokio::task::JoinError), /// Cannot initialize the same validator twice. DuplicatePublicKey, + /// The public key does not exist in the set of initialized validators. + ValidatorNotInitialized(PublicKey), + /// Unable to read the slot clock. + SlotClock, } impl From for Error { fn from(error: LockfileError) -> Self { - Self::LockfileError(error) + Self::Lockfile(error) } } @@ -88,6 +92,8 @@ pub enum SigningMethod { pub struct InitializedValidator { signing_method: SigningMethod, graffiti: Option, + /// The validators index in `state.validators`, to be updated by an external service. + index: Option, } impl InitializedValidator { @@ -212,6 +218,7 @@ impl InitializedValidator { voting_keypair, }, graffiti: def.graffiti.map(Into::into), + index: None, }) } } @@ -313,7 +320,7 @@ impl InitializedValidators { self.definitions.as_slice().len() } - /// Iterate through all **enabled** voting public keys in `self`. + /// Iterate through all voting public keys in `self` that should be used when querying for duties. pub fn iter_voting_pubkeys(&self) -> impl Iterator { self.validators.iter().map(|(pubkey, _)| pubkey) } @@ -456,7 +463,7 @@ impl InitializedValidators { read_password(path).map_err(Error::UnableToReadVotingKeystorePassword)? } else { let keystore = open_keystore(voting_keystore_path)?; - unlock_keystore_via_stdin_password(&keystore, &voting_keystore_path)? + unlock_keystore_via_stdin_password(&keystore, voting_keystore_path)? .0 .as_ref() .to_vec() @@ -622,4 +629,14 @@ impl InitializedValidators { ); Ok(()) } + + pub fn get_index(&self, pubkey: &PublicKeyBytes) -> Option { + self.validators.get(pubkey).and_then(|val| val.index) + } + + pub fn set_index(&mut self, pubkey: &PublicKeyBytes, index: u64) { + if let Some(val) = self.validators.get_mut(pubkey) { + val.index = Some(index); + } + } } diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index d9fe21111be..be9a27db7b9 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -13,6 +13,7 @@ mod key_cache; mod notifier; mod validator_store; +mod doppelganger_service; pub mod http_api; pub use cli::cli_app; @@ -23,6 +24,7 @@ use monitoring_api::{MonitoringHttpClient, ProcessType}; use crate::beacon_node_fallback::{ start_fallback_updater_service, BeaconNodeFallback, CandidateBeaconNode, RequireSynced, }; +use crate::doppelganger_service::DoppelgangerService; use account_utils::validator_definitions::ValidatorDefinitions; use attestation_service::{AttestationService, AttestationServiceBuilder}; use block_service::{BlockService, BlockServiceBuilder}; @@ -61,9 +63,12 @@ const WAITING_FOR_GENESIS_POLL_TIME: Duration = Duration::from_secs(12); /// This can help ensure that proper endpoint fallback occurs. const HTTP_ATTESTATION_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_ATTESTER_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; +const HTTP_LIVENESS_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_PROPOSAL_TIMEOUT_QUOTIENT: u32 = 2; const HTTP_PROPOSER_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; +const DOPPELGANGER_SERVICE_NAME: &str = "doppelganger"; + #[derive(Clone)] pub struct ProductionValidatorClient { context: RuntimeContext, @@ -71,7 +76,8 @@ pub struct ProductionValidatorClient { fork_service: ForkService, block_service: BlockService, attestation_service: AttestationService, - validator_store: ValidatorStore, + doppelganger_service: Option>, + validator_store: Arc>, http_api_listen_addr: Option, http_metrics_ctx: Option>>, config: Config, @@ -84,7 +90,7 @@ impl ProductionValidatorClient { context: RuntimeContext, cli_args: &ArgMatches<'_>, ) -> Result { - let config = Config::from_cli(&cli_args, context.log()) + let config = Config::from_cli(cli_args, context.log()) .map_err(|e| format!("Unable to initialize config: {}", e))?; Self::new(context, config).await } @@ -254,6 +260,7 @@ impl ProductionValidatorClient { Timeouts { attestation: slot_duration / HTTP_ATTESTATION_TIMEOUT_QUOTIENT, attester_duties: slot_duration / HTTP_ATTESTER_DUTIES_TIMEOUT_QUOTIENT, + liveness: slot_duration / HTTP_LIVENESS_TIMEOUT_QUOTIENT, proposal: slot_duration / HTTP_PROPOSAL_TIMEOUT_QUOTIENT, proposer_duties: slot_duration / HTTP_PROPOSER_DUTIES_TIMEOUT_QUOTIENT, } @@ -313,14 +320,27 @@ impl ProductionValidatorClient { .log(log.clone()) .build()?; - let validator_store: ValidatorStore = ValidatorStore::new( - validators, - slashing_protection, - genesis_validators_root, - context.eth2_config.spec.clone(), - fork_service.clone(), - log.clone(), - ); + let doppelganger_service = if config.enable_doppelganger_protection { + Some(Arc::new(DoppelgangerService::new( + context + .service_context(DOPPELGANGER_SERVICE_NAME.into()) + .log() + .clone(), + ))) + } else { + None + }; + + let validator_store: Arc> = + Arc::new(ValidatorStore::new( + validators, + slashing_protection, + genesis_validators_root, + context.eth2_config.spec.clone(), + fork_service.clone(), + doppelganger_service.clone(), + log.clone(), + )); info!( log, @@ -339,7 +359,6 @@ impl ProductionValidatorClient { let duties_service = Arc::new(DutiesService { attesters: <_>::default(), proposers: <_>::default(), - indices: <_>::default(), slot_clock: slot_clock.clone(), beacon_nodes: beacon_nodes.clone(), validator_store: validator_store.clone(), @@ -369,7 +388,7 @@ impl ProductionValidatorClient { let attestation_service = AttestationServiceBuilder::new() .duties_service(duties_service.clone()) - .slot_clock(slot_clock) + .slot_clock(slot_clock.clone()) .validator_store(validator_store.clone()) .beacon_nodes(beacon_nodes.clone()) .runtime_context(context.service_context("attestation".into())) @@ -381,12 +400,16 @@ impl ProductionValidatorClient { // of making too many changes this close to genesis (<1 week). wait_for_genesis(&beacon_nodes, genesis_time, &context).await?; + // Ensure all validators are registered in doppelganger protection. + validator_store.register_all_in_doppelganger_protection_if_enabled()?; + Ok(Self { context, duties_service, fork_service, block_service, attestation_service, + doppelganger_service, validator_store, config, http_api_listen_addr: None, @@ -419,6 +442,20 @@ impl ProductionValidatorClient { .start_update_service(&self.context.eth2_config.spec) .map_err(|e| format!("Unable to start attestation service: {}", e))?; + if let Some(doppelganger_service) = self.doppelganger_service.clone() { + DoppelgangerService::start_update_service( + doppelganger_service, + self.context + .service_context(DOPPELGANGER_SERVICE_NAME.into()), + self.validator_store.clone(), + self.duties_service.beacon_nodes.clone(), + self.duties_service.slot_clock.clone(), + ) + .map_err(|e| format!("Unable to start doppelganger service: {}", e))? + } else { + info!(log, "Doppelganger protection disabled.") + } + spawn_notifier(self).map_err(|e| format!("Failed to start notifier: {}", e))?; let api_secret = ApiSecret::create_or_open(&self.config.validator_dir)?; diff --git a/validator_client/src/notifier.rs b/validator_client/src/notifier.rs index 9b99c1a7e40..e72bd545da2 100644 --- a/validator_client/src/notifier.rs +++ b/validator_client/src/notifier.rs @@ -20,7 +20,7 @@ pub fn spawn_notifier(client: &ProductionValidatorClient) -> Resu loop { if let Some(duration_to_next_slot) = duties_service.slot_clock.duration_to_next_slot() { sleep(duration_to_next_slot + slot_duration / 2).await; - notify(&duties_service, &log).await; + notify(&duties_service, log).await; } else { error!(log, "Failed to read slot clock"); // If we can't read the slot clock, just wait another slot. @@ -72,6 +72,11 @@ async fn notify( let total_validators = duties_service.total_validator_count(); let proposing_validators = duties_service.proposer_count(epoch); let attesting_validators = duties_service.attester_count(epoch); + let doppelganger_detecting_validators = duties_service.doppelganger_detecting_count(); + + if doppelganger_detecting_validators > 0 { + info!(log, "Searching for doppelgangers on the network"; "doppelganger_detecting_validators" => doppelganger_detecting_validators) + } if total_validators == 0 { info!( diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 96024990e67..54cef8b6783 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -1,21 +1,36 @@ use crate::{ - fork_service::ForkService, http_metrics::metrics, initialized_validators::InitializedValidators, + doppelganger_service::DoppelgangerService, fork_service::ForkService, http_metrics::metrics, + initialized_validators::InitializedValidators, }; use account_utils::{validator_definitions::ValidatorDefinition, ZeroizeString}; use parking_lot::{Mutex, RwLock}; use slashing_protection::{NotSafe, Safe, SlashingDatabase}; use slog::{crit, error, info, warn, Logger}; use slot_clock::SlotClock; +use std::iter::FromIterator; use std::path::Path; use std::sync::Arc; -use tempfile::TempDir; use types::{ - graffiti::GraffitiString, Attestation, BeaconBlock, ChainSpec, Domain, Epoch, EthSpec, Fork, - Graffiti, Hash256, Keypair, PublicKeyBytes, SelectionProof, Signature, SignedAggregateAndProof, - SignedBeaconBlock, SignedRoot, Slot, + attestation::Error as AttestationError, graffiti::GraffitiString, Attestation, BeaconBlock, + ChainSpec, Domain, Epoch, EthSpec, Fork, Graffiti, Hash256, Keypair, PublicKeyBytes, + SelectionProof, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedRoot, Slot, }; use validator_dir::ValidatorDir; +pub use crate::doppelganger_service::DoppelgangerStatus; + +#[derive(Debug, PartialEq)] +pub enum Error { + DoppelgangerProtected(PublicKeyBytes), + UnknownToDoppelgangerService(PublicKeyBytes), + UnknownPubkey(PublicKeyBytes), + Slashable(NotSafe), + SameData, + GreaterThanCurrentSlot { slot: Slot, current_slot: Slot }, + GreaterThanCurrentEpoch { epoch: Epoch, current_epoch: Epoch }, + UnableToSignAttestation(AttestationError), +} + /// Number of epochs of slashing protection history to keep. /// /// This acts as a maximum safe-guard against clock drift. @@ -46,7 +61,6 @@ impl PartialEq for LocalValidator { } } -#[derive(Clone)] pub struct ValidatorStore { validators: Arc>, slashing_protection: SlashingDatabase, @@ -54,8 +68,9 @@ pub struct ValidatorStore { genesis_validators_root: Hash256, spec: Arc, log: Logger, - temp_dir: Option>, + doppelganger_service: Option>, fork_service: ForkService, + slot_clock: T, } impl ValidatorStore { @@ -65,6 +80,7 @@ impl ValidatorStore { genesis_validators_root: Hash256, spec: ChainSpec, fork_service: ForkService, + doppelganger_service: Option>, log: Logger, ) -> Self { Self { @@ -73,12 +89,32 @@ impl ValidatorStore { slashing_protection_last_prune: Arc::new(Mutex::new(Epoch::new(0))), genesis_validators_root, spec: Arc::new(spec), - log, - temp_dir: None, + log: log.clone(), + doppelganger_service, + slot_clock: fork_service.slot_clock(), fork_service, } } + /// Register all local validators in doppelganger protection to try and prevent instances of + /// duplicate validators operating on the network at the same time. + /// + /// This function has no effect if doppelganger protection is disabled. + pub fn register_all_in_doppelganger_protection_if_enabled(&self) -> Result<(), String> { + if let Some(doppelganger_service) = &self.doppelganger_service { + for pubkey in self.validators.read().iter_voting_pubkeys() { + doppelganger_service.register_new_validator::(*pubkey, &self.slot_clock)? + } + } + + Ok(()) + } + + /// Returns `true` if doppelganger protection is enabled, or else `false`. + pub fn doppelganger_protection_enabled(&self) -> bool { + self.doppelganger_service.is_some() + } + pub fn initialized_validators(&self) -> Arc> { self.validators.clone() } @@ -105,12 +141,19 @@ impl ValidatorStore { ) .map_err(|e| format!("failed to create validator definitions: {:?}", e))?; + let validator_pubkey = validator_def.voting_public_key.compress(); + self.slashing_protection - .register_validator(validator_def.voting_public_key.compress()) + .register_validator(validator_pubkey) .map_err(|e| format!("failed to register validator: {:?}", e))?; validator_def.enabled = enable; + if let Some(doppelganger_service) = &self.doppelganger_service { + doppelganger_service + .register_new_validator::(validator_pubkey, &self.slot_clock)?; + } + self.validators .write() .add_definition(validator_def.clone()) @@ -120,14 +163,92 @@ impl ValidatorStore { Ok(validator_def) } - pub fn voting_pubkeys(&self) -> Vec { - self.validators + /// Attempts to resolve the pubkey to a validator index. + /// + /// It may return `None` if the `pubkey` is: + /// + /// - Unknown. + /// - Known, but with an unknown index. + pub fn validator_index(&self, pubkey: &PublicKeyBytes) -> Option { + self.validators.read().get_index(pubkey) + } + + /// Returns all voting pubkeys for all enabled validators. + /// + /// The `filter_func` allows for filtering pubkeys based upon their `DoppelgangerStatus`. There + /// are two primary functions used here: + /// + /// - `DoppelgangerStatus::only_safe`: only returns pubkeys which have passed doppelganger + /// protection and are safe-enough to sign messages. + /// - `DoppelgangerStatus::ignored`: returns all the pubkeys from `only_safe` *plus* those still + /// undergoing protection. This is useful for collecting duties or other non-signing tasks. + #[allow(clippy::needless_collect)] // Collect is required to avoid holding a lock. + pub fn voting_pubkeys(&self, filter_func: F) -> I + where + I: FromIterator, + F: Fn(DoppelgangerStatus) -> Option, + { + // Collect all the pubkeys first to avoid interleaving locks on `self.validators` and + // `self.doppelganger_service()`. + let pubkeys = self + .validators .read() .iter_voting_pubkeys() .cloned() + .collect::>(); + + pubkeys + .into_iter() + .map(|pubkey| { + self.doppelganger_service + .as_ref() + .map(|doppelganger_service| doppelganger_service.validator_status(pubkey)) + // Allow signing on all pubkeys if doppelganger protection is disabled. + .unwrap_or_else(|| DoppelgangerStatus::SigningEnabled(pubkey)) + }) + .filter_map(filter_func) .collect() } + /// Returns doppelganger statuses for all enabled validators. + #[allow(clippy::needless_collect)] // Collect is required to avoid holding a lock. + pub fn doppelganger_statuses(&self) -> Vec { + // Collect all the pubkeys first to avoid interleaving locks on `self.validators` and + // `self.doppelganger_service`. + let pubkeys = self + .validators + .read() + .iter_voting_pubkeys() + .cloned() + .collect::>(); + + pubkeys + .into_iter() + .map(|pubkey| { + self.doppelganger_service + .as_ref() + .map(|doppelganger_service| doppelganger_service.validator_status(pubkey)) + // Allow signing on all pubkeys if doppelganger protection is disabled. + .unwrap_or_else(|| DoppelgangerStatus::SigningEnabled(pubkey)) + }) + .collect() + } + + /// Check if the `validator_pubkey` is permitted by the doppleganger protection to sign + /// messages. + pub fn doppelganger_protection_allows_signing(&self, validator_pubkey: PublicKeyBytes) -> bool { + self.doppelganger_service + .as_ref() + // If there's no doppelganger service then we assume it is purposefully disabled and + // declare that all keys are safe with regard to it. + .map_or(true, |doppelganger_service| { + doppelganger_service + .validator_status(validator_pubkey) + .only_safe() + .is_some() + }) + } + pub fn num_voting_validators(&self) -> usize { self.validators.read().num_enabled() } @@ -136,25 +257,56 @@ impl ValidatorStore { self.fork_service.fork() } + /// Runs `func`, providing it access to the `Keypair` corresponding to `validator_pubkey`. + /// + /// This forms the canonical point for accessing the secret key of some validator. It is + /// structured as a `with_...` function since we need to pass-through a read-lock in order to + /// access the keypair. + /// + /// Access to keypairs might be restricted by other internal mechanisms (e.g., doppleganger + /// protection). + /// + /// ## Warning + /// + /// This function takes a read-lock on `self.validators`. To prevent deadlocks, it is advised to + /// never take any sort of concurrency lock inside this function. + fn with_validator_keypair( + &self, + validator_pubkey: PublicKeyBytes, + func: F, + ) -> Result + where + F: FnOnce(&Keypair) -> R, + { + // If the doppelganger service is active, check to ensure it explicitly permits signing by + // this validator. + if !self.doppelganger_protection_allows_signing(validator_pubkey) { + return Err(Error::DoppelgangerProtected(validator_pubkey)); + } + + let validators_lock = self.validators.read(); + + Ok(func( + validators_lock + .voting_keypair(&validator_pubkey) + .ok_or(Error::UnknownPubkey(validator_pubkey))?, + )) + } + pub fn randao_reveal( &self, - validator_pubkey: &PublicKeyBytes, + validator_pubkey: PublicKeyBytes, epoch: Epoch, - ) -> Option { - self.validators - .read() - .voting_keypair(validator_pubkey) - .map(|voting_keypair| { - let domain = self.spec.get_domain( - epoch, - Domain::Randao, - &self.fork(), - self.genesis_validators_root, - ); - let message = epoch.signing_root(domain); + ) -> Result { + let domain = self.spec.get_domain( + epoch, + Domain::Randao, + &self.fork(), + self.genesis_validators_root, + ); + let message = epoch.signing_root(domain); - voting_keypair.sk.sign(message) - }) + self.with_validator_keypair(validator_pubkey, |keypair| keypair.sk.sign(message)) } pub fn graffiti(&self, validator_pubkey: &PublicKeyBytes) -> Option { @@ -163,10 +315,10 @@ impl ValidatorStore { pub fn sign_block( &self, - validator_pubkey: &PublicKeyBytes, + validator_pubkey: PublicKeyBytes, block: BeaconBlock, current_slot: Slot, - ) -> Option> { + ) -> Result, Error> { // Make sure the block slot is not higher than the current slot to avoid potential attacks. if block.slot() > current_slot { warn!( @@ -175,7 +327,10 @@ impl ValidatorStore { "block_slot" => block.slot().as_u64(), "current_slot" => current_slot.as_u64() ); - return None; + return Err(Error::GreaterThanCurrentSlot { + slot: block.slot(), + current_slot, + }); } // Check for slashing conditions. @@ -188,25 +343,19 @@ impl ValidatorStore { ); let slashing_status = self.slashing_protection.check_and_insert_block_proposal( - validator_pubkey, + &validator_pubkey, &block.block_header(), domain, ); match slashing_status { - // We can safely sign this block. + // We can safely sign this block without slashing. Ok(Safe::Valid) => { - let validators = self.validators.read(); - let voting_keypair = validators.voting_keypair(validator_pubkey)?; - metrics::inc_counter_vec(&metrics::SIGNED_BLOCKS_TOTAL, &[metrics::SUCCESS]); - Some(block.sign( - &voting_keypair.sk, - &fork, - self.genesis_validators_root, - &self.spec, - )) + self.with_validator_keypair(validator_pubkey, move |keypair| { + block.sign(&keypair.sk, &fork, self.genesis_validators_root, &self.spec) + }) } Ok(Safe::SameData) => { warn!( @@ -214,7 +363,7 @@ impl ValidatorStore { "Skipping signing of previously signed block"; ); metrics::inc_counter_vec(&metrics::SIGNED_BLOCKS_TOTAL, &[metrics::SAME_DATA]); - None + Err(Error::SameData) } Err(NotSafe::UnregisteredValidator(pk)) => { warn!( @@ -224,7 +373,7 @@ impl ValidatorStore { "public_key" => format!("{:?}", pk) ); metrics::inc_counter_vec(&metrics::SIGNED_BLOCKS_TOTAL, &[metrics::UNREGISTERED]); - None + Err(Error::Slashable(NotSafe::UnregisteredValidator(pk))) } Err(e) => { crit!( @@ -233,21 +382,24 @@ impl ValidatorStore { "error" => format!("{:?}", e) ); metrics::inc_counter_vec(&metrics::SIGNED_BLOCKS_TOTAL, &[metrics::SLASHABLE]); - None + Err(Error::Slashable(e)) } } } pub fn sign_attestation( &self, - validator_pubkey: &PublicKeyBytes, + validator_pubkey: PublicKeyBytes, validator_committee_position: usize, attestation: &mut Attestation, current_epoch: Epoch, - ) -> Option<()> { + ) -> Result<(), Error> { // Make sure the target epoch is not higher than the current epoch to avoid potential attacks. if attestation.data.target.epoch > current_epoch { - return None; + return Err(Error::GreaterThanCurrentEpoch { + epoch: attestation.data.target.epoch, + current_epoch, + }); } // Checking for slashing conditions. @@ -260,7 +412,7 @@ impl ValidatorStore { self.genesis_validators_root, ); let slashing_status = self.slashing_protection.check_and_insert_attestation( - validator_pubkey, + &validator_pubkey, &attestation.data, domain, ); @@ -268,29 +420,20 @@ impl ValidatorStore { match slashing_status { // We can safely sign this attestation. Ok(Safe::Valid) => { - let validators = self.validators.read(); - let voting_keypair = validators.voting_keypair(validator_pubkey)?; - - attestation - .sign( - &voting_keypair.sk, + self.with_validator_keypair(validator_pubkey, |keypair| { + attestation.sign( + &keypair.sk, validator_committee_position, &fork, self.genesis_validators_root, &self.spec, ) - .map_err(|e| { - error!( - self.log, - "Error whilst signing attestation"; - "error" => format!("{:?}", e) - ) - }) - .ok()?; + })? + .map_err(Error::UnableToSignAttestation)?; metrics::inc_counter_vec(&metrics::SIGNED_ATTESTATIONS_TOTAL, &[metrics::SUCCESS]); - Some(()) + Ok(()) } Ok(Safe::SameData) => { warn!( @@ -301,7 +444,7 @@ impl ValidatorStore { &metrics::SIGNED_ATTESTATIONS_TOTAL, &[metrics::SAME_DATA], ); - None + Err(Error::SameData) } Err(NotSafe::UnregisteredValidator(pk)) => { warn!( @@ -314,7 +457,7 @@ impl ValidatorStore { &metrics::SIGNED_ATTESTATIONS_TOTAL, &[metrics::UNREGISTERED], ); - None + Err(Error::Slashable(NotSafe::UnregisteredValidator(pk))) } Err(e) => { crit!( @@ -327,7 +470,7 @@ impl ValidatorStore { &metrics::SIGNED_ATTESTATIONS_TOTAL, &[metrics::SLASHABLE], ); - None + Err(Error::Slashable(e)) } } } @@ -338,46 +481,64 @@ impl ValidatorStore { /// modified by actors other than the signing validator. pub fn produce_signed_aggregate_and_proof( &self, - validator_pubkey: &PublicKeyBytes, + validator_pubkey: PublicKeyBytes, validator_index: u64, aggregate: Attestation, selection_proof: SelectionProof, - ) -> Option> { - let validators = self.validators.read(); - let voting_keypair = &validators.voting_keypair(validator_pubkey)?; + ) -> Result, Error> { + // Take the fork early to avoid lock interleaving. + let fork = self.fork(); + + let proof = self.with_validator_keypair(validator_pubkey, move |keypair| { + SignedAggregateAndProof::from_aggregate( + validator_index, + aggregate, + Some(selection_proof), + &keypair.sk, + &fork, + self.genesis_validators_root, + &self.spec, + ) + })?; metrics::inc_counter_vec(&metrics::SIGNED_AGGREGATES_TOTAL, &[metrics::SUCCESS]); - Some(SignedAggregateAndProof::from_aggregate( - validator_index, - aggregate, - Some(selection_proof), - &voting_keypair.sk, - &self.fork(), - self.genesis_validators_root, - &self.spec, - )) + Ok(proof) } /// Produces a `SelectionProof` for the `slot`, signed by with corresponding secret key to /// `validator_pubkey`. pub fn produce_selection_proof( &self, - validator_pubkey: &PublicKeyBytes, + validator_pubkey: PublicKeyBytes, slot: Slot, - ) -> Option { - let validators = self.validators.read(); - let voting_keypair = &validators.voting_keypair(validator_pubkey)?; - - metrics::inc_counter_vec(&metrics::SIGNED_SELECTION_PROOFS_TOTAL, &[metrics::SUCCESS]); + ) -> Result { + // Take the fork early to avoid lock interleaving. + let fork = self.fork(); - Some(SelectionProof::new::( + // Bypass the `with_validator_keypair` function. + // + // This is because we don't care about doppelganger protection when it comes to selection + // proofs. They are not slashable and we need them to subscribe to subnets on the BN. + // + // As long as we disallow `SignedAggregateAndProof` then these selection proofs will never + // be published on the network. + let validators_lock = self.validators.read(); + let keypair = validators_lock + .voting_keypair(&validator_pubkey) + .ok_or(Error::UnknownPubkey(validator_pubkey))?; + + let proof = SelectionProof::new::( slot, - &voting_keypair.sk, - &self.fork(), + &keypair.sk, + &fork, self.genesis_validators_root, &self.spec, - )) + ); + + metrics::inc_counter_vec(&metrics::SIGNED_SELECTION_PROOFS_TOTAL, &[metrics::SUCCESS]); + + Ok(proof) } /// Prune the slashing protection database so that it remains performant. @@ -411,10 +572,11 @@ impl ValidatorStore { let new_min_target_epoch = current_epoch.saturating_sub(SLASHING_PROTECTION_HISTORY_EPOCHS); let new_min_slot = new_min_target_epoch.start_slot(E::slots_per_epoch()); - let validators = self.validators.read(); + let all_pubkeys: Vec<_> = self.voting_pubkeys(DoppelgangerStatus::ignored); + if let Err(e) = self .slashing_protection - .prune_all_signed_attestations(validators.iter_voting_pubkeys(), new_min_target_epoch) + .prune_all_signed_attestations(all_pubkeys.iter(), new_min_target_epoch) { error!( self.log, @@ -426,7 +588,7 @@ impl ValidatorStore { if let Err(e) = self .slashing_protection - .prune_all_signed_blocks(validators.iter_voting_pubkeys(), new_min_slot) + .prune_all_signed_blocks(all_pubkeys.iter(), new_min_slot) { error!( self.log,