From 4b602ca7f2f9b0d1d2adcf64ec26acc7fb182b3d Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Mon, 4 Nov 2024 17:25:09 +0100 Subject: [PATCH 01/28] EIP-4844-pectra (collect blob gas fee) (#12574) See https://github.com/gnosischain/specs/pull/51 --- core/state_transition.go | 9 ++++----- erigon-lib/chain/chain_config.go | 5 ++++- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/core/state_transition.go b/core/state_transition.go index 2716b25806f..cd9c59a1adf 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -79,8 +79,6 @@ type StateTransition struct { //some pre-allocated intermediate variables sharedBuyGas *uint256.Int sharedBuyGasBalance *uint256.Int - - isBor bool } // Message represents a message sent to a contract. @@ -127,7 +125,6 @@ func IntrinsicGas(data []byte, accessList types2.AccessList, isContractCreation // NewStateTransition initialises and returns a new state transition object. func NewStateTransition(evm *vm.EVM, msg Message, gp *GasPool) *StateTransition { - isBor := evm.ChainConfig().Bor != nil return &StateTransition{ gp: gp, evm: evm, @@ -141,8 +138,6 @@ func NewStateTransition(evm *vm.EVM, msg Message, gp *GasPool) *StateTransition sharedBuyGas: uint256.NewInt(0), sharedBuyGasBalance: uint256.NewInt(0), - - isBor: isBor, } } @@ -481,6 +476,10 @@ func (st *StateTransition) TransitionDb(refunds bool, gasBailout bool) (*evmtype if burntContractAddress != nil { burnAmount := new(uint256.Int).Mul(new(uint256.Int).SetUint64(st.gasUsed()), st.evm.Context.BaseFee) st.state.AddBalance(*burntContractAddress, burnAmount, tracing.BalanceChangeUnspecified) + if rules.IsAura && rules.IsPrague { + // https://github.com/gnosischain/specs/blob/master/network-upgrades/pectra.md#eip-4844-pectra + st.state.AddBalance(*burntContractAddress, st.evm.BlobFee, tracing.BalanceChangeUnspecified) + } } } diff --git a/erigon-lib/chain/chain_config.go b/erigon-lib/chain/chain_config.go index 55c97d6f736..96fbfc7666f 100644 --- a/erigon-lib/chain/chain_config.go +++ b/erigon-lib/chain/chain_config.go @@ -75,7 +75,10 @@ type Config struct { TargetBlobGasPerBlock *uint64 `json:"targetBlobGasPerBlock,omitempty"` BlobGasPriceUpdateFraction *uint64 `json:"blobGasPriceUpdateFraction,omitempty"` - // (Optional) governance contract where EIP-1559 fees will be sent to that otherwise would be burnt since the London fork + // (Optional) governance contract where EIP-1559 fees will be sent to, which otherwise would be burnt since the London fork. + // A key corresponds to the block number, starting from which the fees are sent to the address (map value). + // Starting from Prague, EIP-4844 fees might be collected as well: + // see https://github.com/gnosischain/specs/blob/master/network-upgrades/pectra.md#eip-4844-pectra. BurntContract map[string]common.Address `json:"burntContract,omitempty"` // (Optional) deposit contract of PoS chains From 2b248cab3fe9df0468ef3d174eca045f4ddfccfd Mon Sep 17 00:00:00 2001 From: Somnath Date: Tue, 5 Nov 2024 00:12:15 +0530 Subject: [PATCH 02/28] workflows: Add ethereum/eest separate workflow (#12599) (#12614) Cherry pick https://github.com/erigontech/erigon/pull/12599 --- .github/workflows/test-hive-eest.yml | 81 ++++++++++++++++++++++++++++ Makefile | 6 ++- 2 files changed, 85 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/test-hive-eest.yml diff --git a/.github/workflows/test-hive-eest.yml b/.github/workflows/test-hive-eest.yml new file mode 100644 index 00000000000..9a0a4d4e4ed --- /dev/null +++ b/.github/workflows/test-hive-eest.yml @@ -0,0 +1,81 @@ +name: Hive EEST tests + +on: + push: + branches: + - release/* + - main + workflow_dispatch: + +jobs: + test-hive-eest: + runs-on: ubuntu-latest + steps: + - name: Checkout Hive + uses: actions/checkout@v4 + with: + repository: danceratopz/hive + ref: prague-devnet-4 + path: hive + - name: Setup go env and cache + uses: actions/setup-go@v5 + with: + go-version: '>=1.22' + go-version-file: 'hive/go.mod' + + # Targetting the clients/erigon/Dockerfile.git in the Hive director - + # this builds the container from github and uses it for tests + - name: Get dependencies and build hive + run: | + cd hive + git status + go get . >> buildlogs.log + rm clients/erigon/Dockerfile + mv clients/erigon/Dockerfile.git clients/erigon/Dockerfile + branch_name=$(echo ${GITHUB_REF#refs/heads/} | sed 's/[&/\]/\\&/g') + echo Building Hive with Erigon branch - $branch_name + sed -i "s/^ARG github=ledgerwatch\/erigon$/ARG github=erigontech\/erigon/" clients/erigon/Dockerfile + sed -i "s/^ARG tag=main$/ARG tag=${branch_name}/" clients/erigon/Dockerfile + if [[ "$branch_name" != "main" ]]; then + sed -i "/$sync.parallel-state-flushing/d" clients/erigon/erigon.sh + fi + go build . >> buildlogs.log + # Depends on the last line of hive output that prints the number of suites, tests and failed + # Currently, we fail even if suites and tests are too few, indicating the tests did not run + # We also fail if more than half the tests fail + - name: Run hive tests and parse output + run: | + cd hive + run_suite() { + echo -e "\n\n============================================================" + echo "Running test: ${1}" + echo -e "\n" + ./hive --sim 'ethereum/eest/consume-engine' --client erigon 2>&1 | tee output.log || { + if [ $? -gt 0 ]; then + echo "Exitcode gt 0" + fi + } + status_line=$(tail -2 output.log | head -1 | sed -r "s/\x1B\[[0-9;]*[a-zA-Z]//g") + suites=$(echo "$status_line" | sed -n 's/.*suites=\([0-9]*\).*/\1/p') + if [ -z "$suites" ]; then + status_line=$(tail -1 output.log | sed -r "s/\x1B\[[0-9;]*[a-zA-Z]//g") + suites=$(echo "$status_line" | sed -n 's/.*suites=\([0-9]*\).*/\1/p') + fi + tests=$(echo "$status_line" | sed -n 's/.*tests=\([0-9]*\).*/\1/p') + failed=$(echo "$status_line" | sed -n 's/.*failed=\([0-9]*\).*/\1/p') + + echo -e "\n" + echo "----------- Results for ${1} -----------" + echo "Tests: $tests, Failed: $failed" + echo -e "\n\n============================================================" + + if (( tests < 4 )); then + echo "Too few tests run for suite ${1} - ${tests} tests" + exit 1 + fi + if (( failed*2 > tests )); then + echo "Too many failures for suite ${1} - ${failed} failed out of ${tests}" + exit 1 + fi + } + run_suite eest/consume-engine \ No newline at end of file diff --git a/Makefile b/Makefile index f1aa0999f0c..aab922d9e06 100644 --- a/Makefile +++ b/Makefile @@ -185,11 +185,13 @@ test-integration: test-erigon-lib $(GOTEST) --timeout 240m -tags $(BUILD_TAGS),integration ## test-hive run the hive tests locally off nektos/act workflows simulator -test-hive: +test-hive: @if ! command -v act >/dev/null 2>&1; then \ echo "act command not found in PATH, please source it in PATH. If nektosact is not installed, install it by visiting https://nektosact.com/installation/index.html"; \ elif [ -z "$(GITHUB_TOKEN)"]; then \ - echo "Please export GITHUB_TOKEN var in the environment"; \ + echo "Please export GITHUB_TOKEN var in the environment" ; \ + elif [ "$(SUITE)" = "eest" ]; then \ + act -j test-hive-eest -s GITHUB_TOKEN=$(GITHUB_TOKEN) ; \ else \ act -j test-hive -s GITHUB_TOKEN=$(GITHUB_TOKEN) ; \ fi From 685f6f0604aee37c1d05507e0dbae3d6bf78f1d0 Mon Sep 17 00:00:00 2001 From: milen <94537774+taratorio@users.noreply.github.com> Date: Tue, 5 Nov 2024 16:28:34 +0000 Subject: [PATCH 03/28] heimdall/client: revert retryBackoff hack left behind (#12630) reverts a change from https://github.com/erigontech/erigon/commit/73476abdb0de07654c3a1f9ca6567951e12c2900#diff-ace6e4f24916671d83adb720ebd0fff8e611f09ed451f334e92e10aeb8591525R489-R491 not sure why this has been done - seems like a weird hack that has been left behind maybe unintentionally - it is definitely unnecessary main reason for removing is: - it entirely circumvents the retryBackoff config of the client and always overrides the value which is wrong - it slows down tests a lot - this test should take milliseconds, but after this change it takes a minute ![Screenshot 2024-11-05 at 15 57 08](https://github.com/user-attachments/assets/c8419264-5e73-4e0b-a600-93169b1d12bd) --- polygon/heimdall/client.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/polygon/heimdall/client.go b/polygon/heimdall/client.go index c39062473cd..5cffd3b1997 100644 --- a/polygon/heimdall/client.go +++ b/polygon/heimdall/client.go @@ -486,9 +486,6 @@ func FetchWithRetryEx[T any]( ) (result *T, err error) { attempt := 0 // create a new ticker for retrying the request - if client.retryBackOff < apiHeimdallTimeout { - client.retryBackOff = apiHeimdallTimeout + time.Second*2 - } ticker := time.NewTicker(client.retryBackOff) defer ticker.Stop() From 278466ee3d02060228e89e402049b22ce9e3a4f5 Mon Sep 17 00:00:00 2001 From: Mark Holt <135143369+mh0lt@users.noreply.github.com> Date: Tue, 5 Nov 2024 16:37:09 +0000 Subject: [PATCH 04/28] Re-added missing astrid flags to UnwindHeimdall (#12582) This fixes this issue when running main with bor_heimdall stage: ``` [EROR] [10-31|09:13:03.527] Staged Sync err="[3/9 BorHeimdall] unexpected missing first processed block info entry when unwinding" ``` It was caused by a miss-merge after this PR: https://github.com/erigontech/erigon/pull/12148 --------- Co-authored-by: taratorio <94537774+taratorio@users.noreply.github.com> --- polygon/bor/bordb/prune.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polygon/bor/bordb/prune.go b/polygon/bor/bordb/prune.go index b088089cff6..219afe108c8 100644 --- a/polygon/bor/bordb/prune.go +++ b/polygon/bor/bordb/prune.go @@ -103,7 +103,7 @@ func UnwindHeimdall(ctx context.Context, heimdallStore heimdall.Store, bridgeSto } } - if !unwindCfg.KeepEventProcessedBlocks { + if !unwindCfg.KeepEventProcessedBlocks && unwindCfg.Astrid { if err := bridge.UnwindEventProcessedBlocks(tx, unwindPoint); err != nil { return err } From 9b083e9ae7351bc2fa4c97b99d37c6113e8bd520 Mon Sep 17 00:00:00 2001 From: Ilya Mikheev <54912776+JkLondon@users.noreply.github.com> Date: Wed, 6 Nov 2024 01:59:51 +0100 Subject: [PATCH 05/28] [wip] Moved Merge tests to Fuzz (Fuzzed them) (#12632) closes #12126 --------- Co-authored-by: JkLondon --- erigon-lib/state/aggregator_fuzz_test.go | 247 +++++++++++++++++++++++ erigon-lib/state/aggregator_test.go | 207 ------------------- 2 files changed, 247 insertions(+), 207 deletions(-) diff --git a/erigon-lib/state/aggregator_fuzz_test.go b/erigon-lib/state/aggregator_fuzz_test.go index 268479c09f9..37ab28898f0 100644 --- a/erigon-lib/state/aggregator_fuzz_test.go +++ b/erigon-lib/state/aggregator_fuzz_test.go @@ -19,7 +19,19 @@ package state import ( + "context" + "encoding/binary" + "github.com/c2h5oh/datasize" + "github.com/erigontech/erigon-lib/common" + "github.com/erigontech/erigon-lib/common/datadir" + "github.com/erigontech/erigon-lib/common/length" + "github.com/erigontech/erigon-lib/kv" + "github.com/erigontech/erigon-lib/kv/mdbx" + "github.com/erigontech/erigon-lib/log/v3" + "github.com/erigontech/erigon-lib/types" + "github.com/holiman/uint256" "testing" + "time" "github.com/stretchr/testify/require" ) @@ -38,3 +50,238 @@ func Fuzz_BtreeIndex_Allocation(f *testing.F) { }) } + +func Fuzz_AggregatorV3_Merge(f *testing.F) { + db, agg := testFuzzDbAndAggregatorv3(f, 10) + rwTx, err := db.BeginRwNosync(context.Background()) + require.NoError(f, err) + defer func() { + if rwTx != nil { + rwTx.Rollback() + } + }() + + ac := agg.BeginFilesRo() + defer ac.Close() + domains, err := NewSharedDomains(WrapTxWithCtx(rwTx, ac), log.New()) + require.NoError(f, err) + defer domains.Close() + + const txs = uint64(1000) + + var ( + commKey1 = []byte("someCommKey") + commKey2 = []byte("otherCommKey") + ) + + // keys are encodings of numbers 1..31 + // each key changes value on every txNum which is multiple of the key + var maxWrite, otherMaxWrite uint64 + //f.Add([]common.Address{common.HexToAddress("0x123"), common.HexToAddress("0x456")}) + //f.Add([]common.Hash{common.HexToHash("0x123"), common.HexToHash("0x456")}) + f.Fuzz(func(t *testing.T, data []byte) { + if len(data) < int(txs*(length.Addr+length.Hash)) { + t.Skip() + } + addrData := data[:txs*length.Addr] + locData := data[txs*length.Addr : txs*(length.Addr+length.Hash)] + addrs := make([]common.Address, 1000) + for i := 0; i < 1000; i++ { + copy(addrs[i][:], addrData[i*length.Addr:(i+1)*length.Addr]) + } + locs := make([]common.Address, 1000) + for i := 0; i < 1000; i++ { + copy(locs[i][:], locData[i*length.Hash:(i+1)*length.Hash]) + } + for txNum := uint64(1); txNum <= txs; txNum++ { + domains.SetTxNum(txNum) + + buf := types.EncodeAccountBytesV3(1, uint256.NewInt(0), nil, 0) + err = domains.DomainPut(kv.AccountsDomain, addrs[txNum].Bytes(), nil, buf, nil, 0) + require.NoError(t, err) + + err = domains.DomainPut(kv.StorageDomain, addrs[txNum].Bytes(), locs[txNum].Bytes(), []byte{addrs[txNum].Bytes()[0], locs[txNum].Bytes()[0]}, nil, 0) + require.NoError(t, err) + + var v [8]byte + binary.BigEndian.PutUint64(v[:], txNum) + if txNum%135 == 0 { + pv, step, _, err := ac.GetLatest(kv.CommitmentDomain, commKey2, nil, rwTx) + require.NoError(t, err) + + err = domains.DomainPut(kv.CommitmentDomain, commKey2, nil, v[:], pv, step) + require.NoError(t, err) + otherMaxWrite = txNum + } else { + pv, step, _, err := ac.GetLatest(kv.CommitmentDomain, commKey1, nil, rwTx) + require.NoError(t, err) + + err = domains.DomainPut(kv.CommitmentDomain, commKey1, nil, v[:], pv, step) + require.NoError(t, err) + maxWrite = txNum + } + require.NoError(t, err) + + } + + err = domains.Flush(context.Background(), rwTx) + require.NoError(t, err) + + require.NoError(t, err) + err = rwTx.Commit() + require.NoError(t, err) + rwTx = nil + + err = agg.BuildFiles(txs) + require.NoError(t, err) + + rwTx, err = db.BeginRw(context.Background()) + require.NoError(t, err) + defer rwTx.Rollback() + + logEvery := time.NewTicker(30 * time.Second) + defer logEvery.Stop() + stat, err := ac.Prune(context.Background(), rwTx, 0, logEvery) + require.NoError(t, err) + t.Logf("Prune: %s", stat) + + err = rwTx.Commit() + require.NoError(t, err) + + err = agg.MergeLoop(context.Background()) + require.NoError(t, err) + + // Check the history + roTx, err := db.BeginRo(context.Background()) + require.NoError(t, err) + defer roTx.Rollback() + + dc := agg.BeginFilesRo() + + v, _, ex, err := dc.GetLatest(kv.CommitmentDomain, commKey1, nil, roTx) + require.NoError(t, err) + require.Truef(t, ex, "key %x not found", commKey1) + + require.EqualValues(t, maxWrite, binary.BigEndian.Uint64(v[:])) + + v, _, ex, err = dc.GetLatest(kv.CommitmentDomain, commKey2, nil, roTx) + require.NoError(t, err) + require.Truef(t, ex, "key %x not found", commKey2) + dc.Close() + + require.EqualValues(t, otherMaxWrite, binary.BigEndian.Uint64(v[:])) + }) + +} + +func Fuzz_AggregatorV3_MergeValTransform(f *testing.F) { + db, agg := testFuzzDbAndAggregatorv3(f, 10) + rwTx, err := db.BeginRwNosync(context.Background()) + require.NoError(f, err) + defer func() { + if rwTx != nil { + rwTx.Rollback() + } + }() + ac := agg.BeginFilesRo() + defer ac.Close() + domains, err := NewSharedDomains(WrapTxWithCtx(rwTx, ac), log.New()) + require.NoError(f, err) + defer domains.Close() + + const txs = uint64(1000) + + agg.commitmentValuesTransform = true + + state := make(map[string][]byte) + + // keys are encodings of numbers 1..31 + // each key changes value on every txNum which is multiple of the key + //var maxWrite, otherMaxWrite uint64 + f.Fuzz(func(t *testing.T, data []byte) { + if len(data) < int(txs*(length.Addr+length.Hash)) { + t.Skip() + } + addrData := data[:txs*length.Addr] + locData := data[txs*length.Addr : txs*(length.Addr+length.Hash)] + addrs := make([]common.Address, 1000) + for i := 0; i < 1000; i++ { + copy(addrs[i][:], addrData[i*length.Addr:(i+1)*length.Addr]) + } + locs := make([]common.Address, 1000) + for i := 0; i < 1000; i++ { + copy(locs[i][:], locData[i*length.Hash:(i+1)*length.Hash]) + } + for txNum := uint64(1); txNum <= txs; txNum++ { + domains.SetTxNum(txNum) + + buf := types.EncodeAccountBytesV3(1, uint256.NewInt(txNum*1e6), nil, 0) + err = domains.DomainPut(kv.AccountsDomain, addrs[txNum].Bytes(), nil, buf, nil, 0) + require.NoError(t, err) + + err = domains.DomainPut(kv.StorageDomain, addrs[txNum].Bytes(), locs[txNum].Bytes(), []byte{addrs[txNum].Bytes()[0], locs[txNum].Bytes()[0]}, nil, 0) + require.NoError(t, err) + + if (txNum+1)%agg.StepSize() == 0 { + _, err := domains.ComputeCommitment(context.Background(), true, txNum/10, "") + require.NoError(t, err) + } + + state[string(addrs[txNum].Bytes())] = buf + state[string(addrs[txNum].Bytes())+string(locs[txNum].Bytes())] = []byte{addrs[txNum].Bytes()[0], locs[txNum].Bytes()[0]} + } + + err = domains.Flush(context.Background(), rwTx) + require.NoError(t, err) + + err = rwTx.Commit() + require.NoError(t, err) + rwTx = nil + + err = agg.BuildFiles(txs) + require.NoError(t, err) + + ac.Close() + ac = agg.BeginFilesRo() + defer ac.Close() + + rwTx, err = db.BeginRwNosync(context.Background()) + require.NoError(t, err) + defer func() { + if rwTx != nil { + rwTx.Rollback() + } + }() + + logEvery := time.NewTicker(30 * time.Second) + defer logEvery.Stop() + stat, err := ac.Prune(context.Background(), rwTx, 0, logEvery) + require.NoError(t, err) + t.Logf("Prune: %s", stat) + + err = rwTx.Commit() + require.NoError(t, err) + + err = agg.MergeLoop(context.Background()) + require.NoError(t, err) + }) +} + +func testFuzzDbAndAggregatorv3(f *testing.F, aggStep uint64) (kv.RwDB, *Aggregator) { + f.Helper() + require := require.New(f) + dirs := datadir.New(f.TempDir()) + logger := log.New() + db := mdbx.NewMDBX(logger).InMem(dirs.Chaindata).GrowthStep(32 * datasize.MB).MapSize(2 * datasize.GB).WithTableCfg(func(defaultBuckets kv.TableCfg) kv.TableCfg { + return kv.ChaindataTablesCfg + }).MustOpen() + f.Cleanup(db.Close) + + agg, err := NewAggregator(context.Background(), dirs, aggStep, db, logger) + require.NoError(err) + f.Cleanup(agg.Close) + err = agg.OpenFolder() + require.NoError(err) + agg.DisableFsync() + return db, agg +} diff --git a/erigon-lib/state/aggregator_test.go b/erigon-lib/state/aggregator_test.go index 4a8e2a505d2..f99b5f36d87 100644 --- a/erigon-lib/state/aggregator_test.go +++ b/erigon-lib/state/aggregator_test.go @@ -52,213 +52,6 @@ import ( "github.com/stretchr/testify/require" ) -func TestAggregatorV3_Merge(t *testing.T) { - t.Parallel() - db, agg := testDbAndAggregatorv3(t, 10) - rwTx, err := db.BeginRwNosync(context.Background()) - require.NoError(t, err) - defer func() { - if rwTx != nil { - rwTx.Rollback() - } - }() - - ac := agg.BeginFilesRo() - defer ac.Close() - domains, err := NewSharedDomains(WrapTxWithCtx(rwTx, ac), log.New()) - require.NoError(t, err) - defer domains.Close() - - txs := uint64(1000) - rnd := rand.New(rand.NewSource(time.Now().UnixNano())) - - var ( - commKey1 = []byte("someCommKey") - commKey2 = []byte("otherCommKey") - ) - - // keys are encodings of numbers 1..31 - // each key changes value on every txNum which is multiple of the key - var maxWrite, otherMaxWrite uint64 - for txNum := uint64(1); txNum <= txs; txNum++ { - domains.SetTxNum(txNum) - - addr, loc := make([]byte, length.Addr), make([]byte, length.Hash) - - n, err := rnd.Read(addr) - require.NoError(t, err) - require.EqualValues(t, length.Addr, n) - - n, err = rnd.Read(loc) - require.NoError(t, err) - require.EqualValues(t, length.Hash, n) - - buf := types.EncodeAccountBytesV3(1, uint256.NewInt(0), nil, 0) - err = domains.DomainPut(kv.AccountsDomain, addr, nil, buf, nil, 0) - require.NoError(t, err) - - err = domains.DomainPut(kv.StorageDomain, addr, loc, []byte{addr[0], loc[0]}, nil, 0) - require.NoError(t, err) - - var v [8]byte - binary.BigEndian.PutUint64(v[:], txNum) - if txNum%135 == 0 { - pv, step, _, err := ac.GetLatest(kv.CommitmentDomain, commKey2, nil, rwTx) - require.NoError(t, err) - - err = domains.DomainPut(kv.CommitmentDomain, commKey2, nil, v[:], pv, step) - require.NoError(t, err) - otherMaxWrite = txNum - } else { - pv, step, _, err := ac.GetLatest(kv.CommitmentDomain, commKey1, nil, rwTx) - require.NoError(t, err) - - err = domains.DomainPut(kv.CommitmentDomain, commKey1, nil, v[:], pv, step) - require.NoError(t, err) - maxWrite = txNum - } - require.NoError(t, err) - - } - - err = domains.Flush(context.Background(), rwTx) - require.NoError(t, err) - - require.NoError(t, err) - err = rwTx.Commit() - require.NoError(t, err) - rwTx = nil - - err = agg.BuildFiles(txs) - require.NoError(t, err) - - rwTx, err = db.BeginRw(context.Background()) - require.NoError(t, err) - defer rwTx.Rollback() - - logEvery := time.NewTicker(30 * time.Second) - defer logEvery.Stop() - stat, err := ac.Prune(context.Background(), rwTx, 0, logEvery) - require.NoError(t, err) - t.Logf("Prune: %s", stat) - - err = rwTx.Commit() - require.NoError(t, err) - - err = agg.MergeLoop(context.Background()) - require.NoError(t, err) - - // Check the history - roTx, err := db.BeginRo(context.Background()) - require.NoError(t, err) - defer roTx.Rollback() - - dc := agg.BeginFilesRo() - - v, _, ex, err := dc.GetLatest(kv.CommitmentDomain, commKey1, nil, roTx) - require.NoError(t, err) - require.Truef(t, ex, "key %x not found", commKey1) - - require.EqualValues(t, maxWrite, binary.BigEndian.Uint64(v[:])) - - v, _, ex, err = dc.GetLatest(kv.CommitmentDomain, commKey2, nil, roTx) - require.NoError(t, err) - require.Truef(t, ex, "key %x not found", commKey2) - dc.Close() - - require.EqualValues(t, otherMaxWrite, binary.BigEndian.Uint64(v[:])) -} - -func TestAggregatorV3_MergeValTransform(t *testing.T) { - t.Parallel() - db, agg := testDbAndAggregatorv3(t, 10) - rwTx, err := db.BeginRwNosync(context.Background()) - require.NoError(t, err) - defer func() { - if rwTx != nil { - rwTx.Rollback() - } - }() - ac := agg.BeginFilesRo() - defer ac.Close() - domains, err := NewSharedDomains(WrapTxWithCtx(rwTx, ac), log.New()) - require.NoError(t, err) - defer domains.Close() - - txs := uint64(1000) - rnd := rand.New(rand.NewSource(time.Now().UnixNano())) - - agg.commitmentValuesTransform = true - - state := make(map[string][]byte) - - // keys are encodings of numbers 1..31 - // each key changes value on every txNum which is multiple of the key - //var maxWrite, otherMaxWrite uint64 - for txNum := uint64(1); txNum <= txs; txNum++ { - domains.SetTxNum(txNum) - - addr, loc := make([]byte, length.Addr), make([]byte, length.Hash) - - n, err := rnd.Read(addr) - require.NoError(t, err) - require.EqualValues(t, length.Addr, n) - - n, err = rnd.Read(loc) - require.NoError(t, err) - require.EqualValues(t, length.Hash, n) - - buf := types.EncodeAccountBytesV3(1, uint256.NewInt(txNum*1e6), nil, 0) - err = domains.DomainPut(kv.AccountsDomain, addr, nil, buf, nil, 0) - require.NoError(t, err) - - err = domains.DomainPut(kv.StorageDomain, addr, loc, []byte{addr[0], loc[0]}, nil, 0) - require.NoError(t, err) - - if (txNum+1)%agg.StepSize() == 0 { - _, err := domains.ComputeCommitment(context.Background(), true, txNum/10, "") - require.NoError(t, err) - } - - state[string(addr)] = buf - state[string(addr)+string(loc)] = []byte{addr[0], loc[0]} - } - - err = domains.Flush(context.Background(), rwTx) - require.NoError(t, err) - - err = rwTx.Commit() - require.NoError(t, err) - rwTx = nil - - err = agg.BuildFiles(txs) - require.NoError(t, err) - - ac.Close() - ac = agg.BeginFilesRo() - defer ac.Close() - - rwTx, err = db.BeginRwNosync(context.Background()) - require.NoError(t, err) - defer func() { - if rwTx != nil { - rwTx.Rollback() - } - }() - - logEvery := time.NewTicker(30 * time.Second) - defer logEvery.Stop() - stat, err := ac.Prune(context.Background(), rwTx, 0, logEvery) - require.NoError(t, err) - t.Logf("Prune: %s", stat) - - err = rwTx.Commit() - require.NoError(t, err) - - err = agg.MergeLoop(context.Background()) - require.NoError(t, err) -} - func TestAggregatorV3_RestartOnDatadir(t *testing.T) { t.Parallel() //t.Skip() From 3c9ffb69c124074e917e85c997a2e9027154a50e Mon Sep 17 00:00:00 2001 From: Michelangelo Riccobene Date: Wed, 6 Nov 2024 02:49:02 +0100 Subject: [PATCH 06/28] qa-tests: add timeout to rpc test suite (#12624) Sometimes the changes made in the code cause the test to hang for a long time. To avoid delaying the other executions a timeout of 15 min is added (currently the executions take less than 5 minutes). --- .github/workflows/qa-rpc-integration-tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/qa-rpc-integration-tests.yml b/.github/workflows/qa-rpc-integration-tests.yml index bbdd869281b..c649b64fe42 100644 --- a/.github/workflows/qa-rpc-integration-tests.yml +++ b/.github/workflows/qa-rpc-integration-tests.yml @@ -17,6 +17,7 @@ on: jobs: integration-test-suite: runs-on: [ self-hosted, Erigon3 ] + timeout-minutes: 15 env: ERIGON_REFERENCE_DATA_DIR: /opt/erigon-versions/reference-version/datadir ERIGON_TESTBED_DATA_DIR: /opt/erigon-testbed/datadir From 2595385fd0691c20c84659b00e3ea9cedb76b93b Mon Sep 17 00:00:00 2001 From: RealMaxing Date: Wed, 6 Nov 2024 04:05:11 +0200 Subject: [PATCH 07/28] docs: fix crypto.go path (#12612) Fix the `crypto.go` path --- docs/programmers_guide/guide.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/programmers_guide/guide.md b/docs/programmers_guide/guide.md index a21749fdc80..fb1f99f9654 100644 --- a/docs/programmers_guide/guide.md +++ b/docs/programmers_guide/guide.md @@ -65,16 +65,16 @@ Accounts are identified by their addresses. Address is a 20-byte binary string, contract and non-contract accounts. For non-contract accounts, the address is derived from the public key, by hashing it and taking lowest 20 bytes of the -32-byte hash value, as shown in the function `PubkeyToAddress` in the file [crypto/crypto.go](../../crypto/crypto.go) +32-byte hash value, as shown in the function `PubkeyToAddress` in the file [crypto/crypto.go](../../erigon-lib/crypto/crypto.go) For smart contract accounts created by a transaction without destination, or by `CREATE` opcode, the address is derived from the address and the nonce of the creator, as shown in the function `CreateAddress` in the -file [crypto/crypto.go](../../crypto/crypto.go) +file [crypto/crypto.go](../../erigon-lib/crypto/crypto.go) For smart contract accounts created by `CREATE2` opcode, the address is derived from the creator's address, salt ( 256-bit argument supplied to the `CREATE2` invocation), and the code hash of the initialisation code (code that is executed to output the actual, deployed code of the new contract), as shown in the function `CreateAddress2` in the -file [crypto/crypto.go](../../crypto/crypto.go) +file [crypto/crypto.go](../../erigon-lib/crypto/crypto.go) In many places in the code, sets of accounts are represented by mappings from account addresses to the objects representing the accounts themselves, for example, field `stateObjects` in the From 4d577d505286b78593d45119efc339dcdd046629 Mon Sep 17 00:00:00 2001 From: Michelangelo Riccobene Date: Wed, 6 Nov 2024 03:05:24 +0100 Subject: [PATCH 08/28] qa-tests: change test scheduling (#12613) This PR re-schedule some tests to run at 08:00 PM UTC - It is a try to help team members working at non-EU timezones --- .github/workflows/qa-constrained-tip-tracking.yml | 9 +-------- .github/workflows/qa-snap-download.yml | 2 +- .github/workflows/qa-tip-tracking.yml | 2 +- 3 files changed, 3 insertions(+), 10 deletions(-) diff --git a/.github/workflows/qa-constrained-tip-tracking.yml b/.github/workflows/qa-constrained-tip-tracking.yml index d461a492bfb..658a048343b 100644 --- a/.github/workflows/qa-constrained-tip-tracking.yml +++ b/.github/workflows/qa-constrained-tip-tracking.yml @@ -2,15 +2,8 @@ name: QA - Constrained Tip tracking on: schedule: - - cron: '0 0 * * 0' # Run on Sunday at 00:00 AM UTC + - cron: '0 20 * * 0' # Run on Sunday at 08:00 PM UTC workflow_dispatch: # Run manually - pull_request: - branches: - - qa_tests_constrained_tip_tracking - types: - - opened - - synchronize - - ready_for_review jobs: constrained-tip-tracking-test: diff --git a/.github/workflows/qa-snap-download.yml b/.github/workflows/qa-snap-download.yml index e2fdc0f138a..fa0942406b8 100644 --- a/.github/workflows/qa-snap-download.yml +++ b/.github/workflows/qa-snap-download.yml @@ -2,7 +2,7 @@ name: QA - Snapshot Download on: schedule: - - cron: '0 22 * * 1-6' # Run every night at 22:00 (10:00 PM) UTC except Sunday + - cron: '0 20 * * 1-6' # Run every night at 20:00 (08:00 PM) UTC except Sunday workflow_dispatch: # Run manually jobs: diff --git a/.github/workflows/qa-tip-tracking.yml b/.github/workflows/qa-tip-tracking.yml index 77f5fe9b4b5..aba0c4a5026 100644 --- a/.github/workflows/qa-tip-tracking.yml +++ b/.github/workflows/qa-tip-tracking.yml @@ -2,7 +2,7 @@ name: QA - Tip tracking on: schedule: - - cron: '0 0 * * 1-6' # Run every night at 00:00 AM UTC except Sunday + - cron: '0 20 * * 1-6' # Run every night at 08:00 PM UTC except Sunday workflow_dispatch: # Run manually jobs: From ceae494e73452fae5ed9803deee76bab7869de24 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Wed, 6 Nov 2024 09:45:16 +0700 Subject: [PATCH 09/28] [wip] test ram impact of domain cache (#12620) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index aab922d9e06..55e035f8362 100644 --- a/Makefile +++ b/Makefile @@ -65,7 +65,7 @@ GO_FLAGS += -ldflags "-X ${PACKAGE}/params.GitCommit=${GIT_COMMIT} -X ${PACKAGE} GOBUILD = ${CPU_ARCH} CGO_CFLAGS="$(CGO_CFLAGS)" CGO_LDFLAGS="$(CGO_LDFLAGS)" GOPRIVATE="$(GOPRIVATE)" $(GO) build $(GO_FLAGS) GO_DBG_BUILD = ${CPU_ARCH} CGO_CFLAGS="$(CGO_CFLAGS) -DMDBX_DEBUG=1" CGO_LDFLAGS="$(CGO_LDFLAGS)" GOPRIVATE="$(GOPRIVATE)" $(GO) build -tags $(BUILD_TAGS),debug -gcflags=all="-N -l" # see delve docs -GOTEST = ${CPU_ARCH} CGO_CFLAGS="$(CGO_CFLAGS)" CGO_LDFLAGS="$(CGO_LDFLAGS)" GOPRIVATE="$(GOPRIVATE)" GODEBUG=cgocheck=0 GOTRACEBACK=1 $(GO) test $(GO_FLAGS) ./... -p 2 +GOTEST = ${CPU_ARCH} CGO_CFLAGS="$(CGO_CFLAGS)" CGO_LDFLAGS="$(CGO_LDFLAGS)" GOPRIVATE="$(GOPRIVATE)" GODEBUG=cgocheck=0 GOTRACEBACK=1 $(GO) test $(GO_FLAGS) ./... default: all From 281267514e5c0bac31f05dc3db1bdcb1f0a4d300 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Wed, 6 Nov 2024 09:47:07 +0700 Subject: [PATCH 10/28] [rpc-test] check if .yml support comments (#12622) --- .github/workflows/qa-rpc-integration-tests.yml | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/.github/workflows/qa-rpc-integration-tests.yml b/.github/workflows/qa-rpc-integration-tests.yml index c649b64fe42..1a6e0a0db74 100644 --- a/.github/workflows/qa-rpc-integration-tests.yml +++ b/.github/workflows/qa-rpc-integration-tests.yml @@ -120,16 +120,8 @@ jobs: erigon_getLatestLogs/test_12.json,\ erigon_getBalanceChangesInBlock,\ eth_createAccessList/test_16.json,\ - parity_getBlockReceipts/test_01.json,\ - parity_getBlockReceipts/test_02.json,\ - parity_getBlockReceipts/test_03.json,\ - parity_getBlockReceipts/test_04.json,\ - parity_getBlockReceipts/test_05.json,\ - parity_getBlockReceipts/test_06.json,\ - parity_getBlockReceipts/test_07.json,\ - parity_getBlockReceipts/test_08.json,\ - parity_getBlockReceipts/test_09.json,\ - parity_getBlockReceipts/test_10.json,\ +# parity_getBlockReceipts was renamet to eth_getBlockReceipts + parity_getBlockReceipts,\ trace_filter/test_16.json,\ trace_rawTransaction/test_01.json,\ trace_rawTransaction/test_03.json,\ From a5becca9052600473edead3b54e98a2a1aef0d35 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Wed, 6 Nov 2024 09:57:17 +0700 Subject: [PATCH 11/28] `state` pkg use `math/rand/v2` in tests (#12619) --- .github/workflows/lint.yml | 2 +- erigon-lib/.golangci.yml | 2 + erigon-lib/state/aggregator_bench_test.go | 21 ++++---- erigon-lib/state/aggregator_fuzz_test.go | 5 +- erigon-lib/state/aggregator_test.go | 31 ++++++------ erigon-lib/state/bps_tree.go | 5 +- erigon-lib/state/domain_shared_bench_test.go | 6 +-- erigon-lib/state/domain_shared_test.go | 7 ++- erigon-lib/state/domain_test.go | 50 +++++++++++--------- erigon-lib/tools/golangci_lint.sh | 2 +- 10 files changed, 68 insertions(+), 63 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 86502cf83d8..3efd756d7e5 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -32,7 +32,7 @@ jobs: if: runner.os == 'Linux' uses: golangci/golangci-lint-action@v6 with: - version: v1.59.1 + version: v1.61.0 args: --help - name: Lint diff --git a/erigon-lib/.golangci.yml b/erigon-lib/.golangci.yml index d66cd898232..7f4a485356d 100644 --- a/erigon-lib/.golangci.yml +++ b/erigon-lib/.golangci.yml @@ -22,6 +22,7 @@ linters: - testifylint #TODO: enable me - perfsprint #TODO: enable me - protogetter + - typecheck enable: - unconvert - predeclared @@ -111,6 +112,7 @@ issues: - unused - gocritic - perfsprint + - typecheck - path: hack\.go linters: - gosec diff --git a/erigon-lib/state/aggregator_bench_test.go b/erigon-lib/state/aggregator_bench_test.go index 894563f9eef..089884db80a 100644 --- a/erigon-lib/state/aggregator_bench_test.go +++ b/erigon-lib/state/aggregator_bench_test.go @@ -20,7 +20,6 @@ import ( "bytes" "context" "fmt" - "math/rand" "os" "path" "path/filepath" @@ -109,7 +108,7 @@ func BenchmarkAggregator_Processing(b *testing.B) { } func queueKeys(ctx context.Context, seed, ofSize uint64) <-chan []byte { - rnd := rand.New(rand.NewSource(int64(seed))) + rnd := newRnd(seed) keys := make(chan []byte, 1) go func() { for { @@ -127,10 +126,10 @@ func queueKeys(ctx context.Context, seed, ofSize uint64) <-chan []byte { } func Benchmark_BtreeIndex_Allocation(b *testing.B) { - rnd := rand.New(rand.NewSource(time.Now().UnixNano())) + rnd := newRnd(uint64(time.Now().UnixNano())) for i := 0; i < b.N; i++ { now := time.Now() - count := rnd.Intn(1000000000) + count := rnd.IntN(1000000000) bt := newBtAlloc(uint64(count), uint64(1<<12), true, nil, nil) bt.traverseDfs() fmt.Printf("alloc %v\n", time.Since(now)) @@ -139,7 +138,7 @@ func Benchmark_BtreeIndex_Allocation(b *testing.B) { func Benchmark_BtreeIndex_Search(b *testing.B) { logger := log.New() - rnd := rand.New(rand.NewSource(time.Now().UnixNano())) + rnd := newRnd(uint64(time.Now().UnixNano())) tmp := b.TempDir() defer os.RemoveAll(tmp) dataPath := "../../data/storage.256-288.kv" @@ -159,7 +158,7 @@ func Benchmark_BtreeIndex_Search(b *testing.B) { getter := seg.NewReader(kv.MakeGetter(), comp) for i := 0; i < b.N; i++ { - p := rnd.Intn(len(keys)) + p := rnd.IntN(len(keys)) cur, err := bt.Seek(getter, keys[p]) require.NoErrorf(b, err, "i=%d", i) require.EqualValues(b, keys[p], cur.Key()) @@ -193,12 +192,12 @@ func Benchmark_BTree_Seek(b *testing.B) { M := uint64(1024) compress := seg.CompressNone kv, bt, keys, _ := benchInitBtreeIndex(b, M, compress) - rnd := rand.New(rand.NewSource(time.Now().UnixNano())) + rnd := newRnd(uint64(time.Now().UnixNano())) getter := seg.NewReader(kv.MakeGetter(), compress) b.Run("seek_only", func(b *testing.B) { for i := 0; i < b.N; i++ { - p := rnd.Intn(len(keys)) + p := rnd.IntN(len(keys)) cur, err := bt.Seek(getter, keys[p]) require.NoError(b, err) @@ -209,7 +208,7 @@ func Benchmark_BTree_Seek(b *testing.B) { b.Run("seek_then_next", func(b *testing.B) { for i := 0; i < b.N; i++ { - p := rnd.Intn(len(keys)) + p := rnd.IntN(len(keys)) cur, err := bt.Seek(getter, keys[p]) require.NoError(b, err) @@ -249,7 +248,7 @@ func Benchmark_Recsplit_Find_ExternalFile(b *testing.B) { b.Skip("requires existing KV index file at ../../data/storage.kv") } - rnd := rand.New(rand.NewSource(time.Now().UnixNano())) + rnd := newRnd(uint64(time.Now().UnixNano())) tmp := b.TempDir() defer os.RemoveAll(tmp) @@ -269,7 +268,7 @@ func Benchmark_Recsplit_Find_ExternalFile(b *testing.B) { require.NoError(b, err) for i := 0; i < b.N; i++ { - p := rnd.Intn(len(keys)) + p := rnd.IntN(len(keys)) offset, _ := idxr.Lookup(keys[p]) getter.Reset(offset) diff --git a/erigon-lib/state/aggregator_fuzz_test.go b/erigon-lib/state/aggregator_fuzz_test.go index 37ab28898f0..103bc56a358 100644 --- a/erigon-lib/state/aggregator_fuzz_test.go +++ b/erigon-lib/state/aggregator_fuzz_test.go @@ -21,6 +21,9 @@ package state import ( "context" "encoding/binary" + "testing" + "time" + "github.com/c2h5oh/datasize" "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/common/datadir" @@ -30,8 +33,6 @@ import ( "github.com/erigontech/erigon-lib/log/v3" "github.com/erigontech/erigon-lib/types" "github.com/holiman/uint256" - "testing" - "time" "github.com/stretchr/testify/require" ) diff --git a/erigon-lib/state/aggregator_test.go b/erigon-lib/state/aggregator_test.go index f99b5f36d87..5b50a93392a 100644 --- a/erigon-lib/state/aggregator_test.go +++ b/erigon-lib/state/aggregator_test.go @@ -22,7 +22,6 @@ import ( "encoding/binary" "encoding/hex" "fmt" - "github.com/erigontech/erigon-lib/commitment" "math" "math/rand" "os" @@ -33,6 +32,8 @@ import ( "testing" "time" + "github.com/erigontech/erigon-lib/commitment" + "github.com/erigontech/erigon-lib/common/background" "github.com/c2h5oh/datasize" @@ -109,7 +110,7 @@ func aggregatorV3_RestartOnDatadir(t *testing.T, rc runCfg) { defer domains.Close() var latestCommitTxNum uint64 - rnd := rand.New(rand.NewSource(time.Now().Unix())) + rnd := newRnd(0) someKey := []byte("somekey") txs := (aggStep / 2) * 19 @@ -249,7 +250,7 @@ func TestAggregatorV3_PruneSmallBatches(t *testing.T) { maxTx := aggStep * 5 t.Logf("step=%d tx_count=%d\n", aggStep, maxTx) - rnd := rand.New(rand.NewSource(0)) + rnd := newRnd(0) generateSharedDomainsUpdates(t, domains, maxTx, rnd, 20, 10, aggStep/2) @@ -429,7 +430,7 @@ func fillRawdbTxNumsIndexForSharedDomains(t *testing.T, rwTx kv.RwTx, maxTx, com } } -func generateSharedDomainsUpdates(t *testing.T, domains *SharedDomains, maxTxNum uint64, rnd *rand.Rand, keyMaxLen, keysCount, commitEvery uint64) map[string]struct{} { +func generateSharedDomainsUpdates(t *testing.T, domains *SharedDomains, maxTxNum uint64, rnd *rndGen, keyMaxLen, keysCount, commitEvery uint64) map[string]struct{} { t.Helper() usedKeys := make(map[string]struct{}, keysCount*maxTxNum) for txNum := uint64(1); txNum <= maxTxNum; txNum++ { @@ -445,14 +446,14 @@ func generateSharedDomainsUpdates(t *testing.T, domains *SharedDomains, maxTxNum return usedKeys } -func generateSharedDomainsUpdatesForTx(t *testing.T, domains *SharedDomains, txNum uint64, rnd *rand.Rand, prevKeys map[string]struct{}, keyMaxLen, keysCount uint64) map[string]struct{} { +func generateSharedDomainsUpdatesForTx(t *testing.T, domains *SharedDomains, txNum uint64, rnd *rndGen, prevKeys map[string]struct{}, keyMaxLen, keysCount uint64) map[string]struct{} { t.Helper() domains.SetTxNum(txNum) getKey := func() ([]byte, bool) { - r := rnd.Intn(100) + r := rnd.IntN(100) if r < 50 && len(prevKeys) > 0 { - ri := rnd.Intn(len(prevKeys)) + ri := rnd.IntN(len(prevKeys)) for k := range prevKeys { if ri == 0 { return []byte(k), true @@ -471,7 +472,7 @@ func generateSharedDomainsUpdatesForTx(t *testing.T, domains *SharedDomains, txN for j := uint64(0); j < keysCount; j++ { key, existed := getKey() - r := rnd.Intn(101) + r := rnd.IntN(101) switch { case r <= 33: buf := types.EncodeAccountBytesV3(txNum, uint256.NewInt(txNum*100_000), nil, 0) @@ -484,7 +485,7 @@ func generateSharedDomainsUpdatesForTx(t *testing.T, domains *SharedDomains, txN require.NoError(t, err) case r > 33 && r <= 66: - codeUpd := make([]byte, rnd.Intn(24576)) + codeUpd := make([]byte, rnd.IntN(24576)) _, err := rnd.Read(codeUpd) require.NoError(t, err) for limit := 1000; len(key) > length.Addr && limit > 0; limit-- { @@ -569,7 +570,7 @@ func TestAggregatorV3_RestartOnFiles(t *testing.T) { txs := aggStep * 5 t.Logf("step=%d tx_count=%d\n", aggStep, txs) - rnd := rand.New(rand.NewSource(0)) + rnd := newRnd(0) keys := make([][]byte, txs) for txNum := uint64(1); txNum <= txs; txNum++ { @@ -708,7 +709,7 @@ func TestAggregatorV3_ReplaceCommittedKeys(t *testing.T) { txs := (aggStep) * StepsInColdFile t.Logf("step=%d tx_count=%d", aggStep, txs) - rnd := rand.New(rand.NewSource(0)) + rnd := newRnd(0) keys := make([][]byte, txs/2) var prev1, prev2 []byte @@ -822,7 +823,7 @@ func pivotKeysFromKV(dataPath string) ([][]byte, error) { func generateKV(tb testing.TB, tmp string, keySize, valueSize, keyCount int, logger log.Logger, compressFlags seg.FileCompression) string { tb.Helper() - rnd := rand.New(rand.NewSource(0)) + rnd := newRnd(0) values := make([]byte, valueSize) dataPath := path.Join(tmp, fmt.Sprintf("%dk.kv", keyCount/1000)) @@ -842,7 +843,7 @@ func generateKV(tb testing.TB, tmp string, keySize, valueSize, keyCount int, log binary.BigEndian.PutUint64(key[keySize-8:], uint64(i)) require.NoError(tb, err) - n, err = rnd.Read(values[:rnd.Intn(valueSize)+1]) + n, err = rnd.Read(values[:rnd.IntN(valueSize)+1]) require.NoError(tb, err) err = collector.Collect(key, values[:n]) @@ -904,7 +905,7 @@ func testDbAndAggregatorv3(t *testing.T, aggStep uint64) (kv.RwDB, *Aggregator) func generateInputData(tb testing.TB, keySize, valueSize, keyCount int) ([][]byte, [][]byte) { tb.Helper() - rnd := rand.New(rand.NewSource(0)) + rnd := newRnd(0) values := make([][]byte, keyCount) keys := make([][]byte, keyCount) @@ -915,7 +916,7 @@ func generateInputData(tb testing.TB, keySize, valueSize, keyCount int) ([][]byt require.NoError(tb, err) keys[i] = common.Copy(bk[:n]) - n, err = rnd.Read(bv[:rnd.Intn(valueSize)+1]) + n, err = rnd.Read(bv[:rnd.IntN(valueSize)+1]) require.NoError(tb, err) values[i] = common.Copy(bv[:n]) diff --git a/erigon-lib/state/bps_tree.go b/erigon-lib/state/bps_tree.go index d039f8ef5dd..188d7723590 100644 --- a/erigon-lib/state/bps_tree.go +++ b/erigon-lib/state/bps_tree.go @@ -25,14 +25,13 @@ import ( "time" "unsafe" - "github.com/erigontech/erigon-lib/common/dbg" - "github.com/c2h5oh/datasize" - "github.com/erigontech/erigon-lib/seg" "github.com/erigontech/erigon-lib/common" + "github.com/erigontech/erigon-lib/common/dbg" "github.com/erigontech/erigon-lib/log/v3" "github.com/erigontech/erigon-lib/recsplit/eliasfano32" + "github.com/erigontech/erigon-lib/seg" ) // nolint diff --git a/erigon-lib/state/domain_shared_bench_test.go b/erigon-lib/state/domain_shared_bench_test.go index 927255bbaba..2c8700e1734 100644 --- a/erigon-lib/state/domain_shared_bench_test.go +++ b/erigon-lib/state/domain_shared_bench_test.go @@ -19,7 +19,6 @@ package state import ( "context" "encoding/binary" - "math/rand" "testing" "github.com/stretchr/testify/require" @@ -46,8 +45,7 @@ func Benchmark_SharedDomains_GetLatest(t *testing.B) { defer domains.Close() maxTx := stepSize * 258 - seed := int64(4500) - rnd := rand.New(rand.NewSource(seed)) + rnd := newRnd(4500) keys := make([][]byte, 8) for i := 0; i < len(keys); i++ { @@ -104,7 +102,7 @@ func Benchmark_SharedDomains_GetLatest(t *testing.B) { for ik := 0; ik < t.N; ik++ { for i := 0; i < len(keys); i++ { - ts := uint64(rnd.Intn(int(maxTx))) + ts := uint64(rnd.IntN(int(maxTx))) v, ok, err := ac2.HistorySeek(kv.AccountsHistory, keys[i], ts, rwTx) require.True(t, ok) diff --git a/erigon-lib/state/domain_shared_test.go b/erigon-lib/state/domain_shared_test.go index d9fcbd20201..17606175321 100644 --- a/erigon-lib/state/domain_shared_test.go +++ b/erigon-lib/state/domain_shared_test.go @@ -20,7 +20,6 @@ import ( "context" "encoding/binary" "fmt" - "math/rand" "testing" "time" @@ -53,7 +52,7 @@ func TestSharedDomain_CommitmentKeyReplacement(t *testing.T) { require.NoError(t, err) defer domains.Close() - rnd := rand.New(rand.NewSource(2342)) + rnd := newRnd(2342) maxTx := stepSize * 8 // 1. generate data @@ -134,7 +133,7 @@ func TestSharedDomain_Unwind(t *testing.T) { maxTx := stepSize hashes := make([][]byte, maxTx) count := 10 - rnd := rand.New(rand.NewSource(0)) + rnd := newRnd(0) ac.Close() err = rwTx.Commit() require.NoError(t, err) @@ -180,7 +179,7 @@ Loop: err = domains.Flush(ctx, rwTx) require.NoError(t, err) - unwindTo := uint64(commitStep * rnd.Intn(int(maxTx)/commitStep)) + unwindTo := uint64(commitStep * rnd.IntN(int(maxTx)/commitStep)) domains.currentChangesAccumulator = nil acu := agg.BeginFilesRo() diff --git a/erigon-lib/state/domain_test.go b/erigon-lib/state/domain_test.go index f8f0b8d0ee3..5f29a48b56e 100644 --- a/erigon-lib/state/domain_test.go +++ b/erigon-lib/state/domain_test.go @@ -24,7 +24,8 @@ import ( "fmt" "io/fs" "math" - "math/rand" + randOld "math/rand" + "math/rand/v2" "os" "path/filepath" "sort" @@ -51,6 +52,20 @@ import ( "github.com/erigontech/erigon-lib/types" ) +type rndGen struct { + *rand.Rand + oldGen *randOld.Rand +} + +func newRnd(seed uint64) *rndGen { + return &rndGen{ + Rand: rand.New(rand.NewChaCha8([32]byte{byte(seed)})), + oldGen: randOld.New(randOld.NewSource(int64(seed))), + } +} +func (r *rndGen) IntN(n int) int { return int(r.Uint64N(uint64(n))) } +func (r *rndGen) Read(p []byte) (n int, err error) { return r.oldGen.Read(p) } // seems `go1.22` doesn't have `Read` method on `math/v2` generator + func testDbAndDomain(t *testing.T, logger log.Logger) (kv.RwDB, *Domain) { t.Helper() return testDbAndDomainOfStep(t, 16, logger) @@ -1262,10 +1277,7 @@ func generateTestDataForDomainCommitment(tb testing.TB, keySize1, keySize2, tota tb.Helper() doms := make(map[string]map[string][]upd) - seed := 31 - //seed := time.Now().Unix() - defer tb.Logf("generated data with seed %d, keys %d", seed, keyLimit) - r := rand.New(rand.NewSource(0)) + r := newRnd(31) accs := make(map[string][]upd) stor := make(map[string][]upd) @@ -1293,11 +1305,7 @@ func generateTestData(tb testing.TB, keySize1, keySize2, totalTx, keyTxsLimit, k tb.Helper() data := make(map[string][]upd) - //seed := time.Now().Unix() - seed := 31 - defer tb.Logf("generated data with seed %d, keys %d", seed, keyLimit) - - r := rand.New(rand.NewSource(0)) + r := newRnd(31) if keyLimit == 1 { key1 := generateRandomKey(r, keySize1) data[key1] = generateUpdates(r, totalTx, keyTxsLimit) @@ -1313,24 +1321,24 @@ func generateTestData(tb testing.TB, keySize1, keySize2, totalTx, keyTxsLimit, k return data } -func generateRandomKey(r *rand.Rand, size uint64) string { +func generateRandomKey(r *rndGen, size uint64) string { return string(generateRandomKeyBytes(r, size)) } -func generateRandomKeyBytes(r *rand.Rand, size uint64) []byte { +func generateRandomKeyBytes(r *rndGen, size uint64) []byte { key := make([]byte, size) r.Read(key) return key } -func generateAccountUpdates(r *rand.Rand, totalTx, keyTxsLimit uint64) []upd { +func generateAccountUpdates(r *rndGen, totalTx, keyTxsLimit uint64) []upd { updates := make([]upd, 0) usedTxNums := make(map[uint64]bool) for i := uint64(0); i < keyTxsLimit; i++ { txNum := generateRandomTxNum(r, totalTx, usedTxNums) - jitter := r.Intn(10e7) + jitter := r.IntN(10e7) value := types.EncodeAccountBytesV3(i, uint256.NewInt(i*10e4+uint64(jitter)), nil, 0) updates = append(updates, upd{txNum: txNum, value: value}) @@ -1341,7 +1349,7 @@ func generateAccountUpdates(r *rand.Rand, totalTx, keyTxsLimit uint64) []upd { return updates } -func generateArbitraryValueUpdates(r *rand.Rand, totalTx, keyTxsLimit, maxSize uint64) []upd { +func generateArbitraryValueUpdates(r *rndGen, totalTx, keyTxsLimit, maxSize uint64) []upd { updates := make([]upd, 0) usedTxNums := make(map[uint64]bool) //maxStorageSize := 24 * (1 << 10) // limit on contract code @@ -1349,7 +1357,7 @@ func generateArbitraryValueUpdates(r *rand.Rand, totalTx, keyTxsLimit, maxSize u for i := uint64(0); i < keyTxsLimit; i++ { txNum := generateRandomTxNum(r, totalTx, usedTxNums) - value := make([]byte, r.Intn(int(maxSize))) + value := make([]byte, r.IntN(int(maxSize))) r.Read(value) updates = append(updates, upd{txNum: txNum, value: value}) @@ -1360,7 +1368,7 @@ func generateArbitraryValueUpdates(r *rand.Rand, totalTx, keyTxsLimit, maxSize u return updates } -func generateUpdates(r *rand.Rand, totalTx, keyTxsLimit uint64) []upd { +func generateUpdates(r *rndGen, totalTx, keyTxsLimit uint64) []upd { updates := make([]upd, 0) usedTxNums := make(map[uint64]bool) @@ -1377,10 +1385,10 @@ func generateUpdates(r *rand.Rand, totalTx, keyTxsLimit uint64) []upd { return updates } -func generateRandomTxNum(r *rand.Rand, maxTxNum uint64, usedTxNums map[uint64]bool) uint64 { - txNum := uint64(r.Intn(int(maxTxNum))) +func generateRandomTxNum(r *rndGen, maxTxNum uint64, usedTxNums map[uint64]bool) uint64 { + txNum := uint64(r.IntN(int(maxTxNum))) for usedTxNums[txNum] { - txNum = uint64(r.Intn(int(maxTxNum))) + txNum = uint64(r.IntN(int(maxTxNum))) } return txNum @@ -1461,8 +1469,6 @@ func TestDomain_CanPruneAfterAggregation(t *testing.T) { aggStep := uint64(25) db, d := testDbAndDomainOfStep(t, aggStep, log.New()) - defer db.Close() - defer d.Close() tx, err := db.BeginRw(context.Background()) require.NoError(t, err) diff --git a/erigon-lib/tools/golangci_lint.sh b/erigon-lib/tools/golangci_lint.sh index ada4234150d..4c812bc72b9 100755 --- a/erigon-lib/tools/golangci_lint.sh +++ b/erigon-lib/tools/golangci_lint.sh @@ -2,7 +2,7 @@ scriptDir=$(dirname "${BASH_SOURCE[0]}") scriptName=$(basename "${BASH_SOURCE[0]}") -version="v1.59.1" +version="v1.60.0" if [[ "$1" == "--install-deps" ]] then From f4ece0d738fd4a2b7be6b0483cf6251084459804 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Wed, 6 Nov 2024 10:26:21 +0700 Subject: [PATCH 12/28] seg: add `BinarySearch` method (#12618) --- erigon-lib/seg/decompress.go | 26 +++++++++ erigon-lib/seg/decompress_test.go | 87 ++++++++++++++++++++++++------- 2 files changed, 94 insertions(+), 19 deletions(-) diff --git a/erigon-lib/seg/decompress.go b/erigon-lib/seg/decompress.go index 44b890b064e..ca257a11bae 100644 --- a/erigon-lib/seg/decompress.go +++ b/erigon-lib/seg/decompress.go @@ -23,6 +23,7 @@ import ( "fmt" "os" "path/filepath" + "sort" "strconv" "sync/atomic" "time" @@ -1064,3 +1065,28 @@ func (g *Getter) FastNext(buf []byte) ([]byte, uint64) { g.dataBit = 0 return buf[:wordLen], postLoopPos } + +// BinarySearch - !expecting sorted file - does Seek `g` to key which >= `fromPrefix` by using BinarySearch - means unoptimal and touching many places in file +// use `.Next` to read found +// at `ok = false` leaving `g` in unpredictible state +func (g *Getter) BinarySearch(seek []byte, count int, getOffset func(i uint64) (offset uint64)) (foundOffset uint64, ok bool) { + var key []byte + foundItem := sort.Search(count, func(i int) bool { + offset := getOffset(uint64(i)) + g.Reset(offset) + if g.HasNext() { + key, _ = g.Next(key[:0]) + return bytes.Compare(key, seek) >= 0 + } + return false + }) + if foundItem == count { // `Search` returns `n` if not found + return 0, false + } + foundOffset = getOffset(uint64(foundItem)) + g.Reset(foundOffset) + if !g.HasNext() { + return 0, false + } + return foundOffset, true +} diff --git a/erigon-lib/seg/decompress_test.go b/erigon-lib/seg/decompress_test.go index f43f0e08048..9e6ba470323 100644 --- a/erigon-lib/seg/decompress_test.go +++ b/erigon-lib/seg/decompress_test.go @@ -24,6 +24,7 @@ import ( "math/rand" "os" "path/filepath" + "slices" "strings" "testing" "time" @@ -257,22 +258,23 @@ func prepareLoremDictUncompressed(t *testing.T) *Decompressor { cfg.MinPatternScore = 1 cfg.Workers = 2 c, err := NewCompressor(context.Background(), t.Name(), file, tmpDir, cfg, log.LvlDebug, logger) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer c.Close() + slices.Sort(loremStrings) for k, w := range loremStrings { - if err = c.AddUncompressedWord([]byte(fmt.Sprintf("%s %d", w, k))); err != nil { - t.Fatal(err) + if len(w) == 0 { + err = c.AddUncompressedWord([]byte(w)) + require.NoError(t, err) + continue } + err = c.AddUncompressedWord([]byte(fmt.Sprintf("%s %d", w, k))) + require.NoError(t, err) } - if err = c.Compress(); err != nil { - t.Fatal(err) - } - var d *Decompressor - if d, err = NewDecompressor(file); err != nil { - t.Fatal(err) - } + err = c.Compress() + require.NoError(t, err) + d, err := NewDecompressor(file) + require.NoError(t, err) + t.Cleanup(d.Close) return d } @@ -281,16 +283,60 @@ func TestUncompressed(t *testing.T) { defer d.Close() g := d.MakeGetter() i := 0 + var offsets []uint64 + offsets = append(offsets, 0) for g.HasNext() { w := loremStrings[i] expected := []byte(fmt.Sprintf("%s %d", w, i+1)) expected = expected[:len(expected)/2] - actual, _ := g.NextUncompressed() + actual, offset := g.NextUncompressed() if bytes.Equal(expected, actual) { t.Errorf("expected %s, actual %s", expected, actual) } i++ - } + offsets = append(offsets, offset) + } + + t.Run("BinarySearch middle", func(t *testing.T) { + require := require.New(t) + _, ok := g.BinarySearch([]byte("ipsum"), d.Count(), func(i uint64) (offset uint64) { return offsets[i] }) + require.True(ok) + k, _ := g.Next(nil) + require.Equal("ipsum 38", string(k)) + _, ok = g.BinarySearch([]byte("ipsu"), d.Count(), func(i uint64) (offset uint64) { return offsets[i] }) + require.True(ok) + k, _ = g.Next(nil) + require.Equal("ipsum 38", string(k)) + }) + t.Run("BinarySearch end of file", func(t *testing.T) { + require := require.New(t) + //last word is `voluptate` + _, ok := g.BinarySearch([]byte("voluptate"), d.Count(), func(i uint64) (offset uint64) { return offsets[i] }) + require.True(ok) + k, _ := g.Next(nil) + require.Equal("voluptate 69", string(k)) + _, ok = g.BinarySearch([]byte("voluptat"), d.Count(), func(i uint64) (offset uint64) { return offsets[i] }) + require.True(ok) + k, _ = g.Next(nil) + require.Equal("voluptate 69", string(k)) + _, ok = g.BinarySearch([]byte("voluptatez"), d.Count(), func(i uint64) (offset uint64) { return offsets[i] }) + require.False(ok) + }) + + t.Run("BinarySearch begin of file", func(t *testing.T) { + require := require.New(t) + //first word is `` + _, ok := g.BinarySearch([]byte(""), d.Count(), func(i uint64) (offset uint64) { return offsets[i] }) + require.True(ok) + k, _ := g.Next(nil) + require.Equal("", string(k)) + + _, ok = g.BinarySearch(nil, d.Count(), func(i uint64) (offset uint64) { return offsets[i] }) + require.True(ok) + k, _ = g.Next(nil) + require.Equal("", string(k)) + }) + } func TestDecompressor_OpenCorrupted(t *testing.T) { @@ -461,12 +507,15 @@ func TestDecompressor_OpenCorrupted(t *testing.T) { }) } -const lorem = `Lorem ipsum dolor sit amet consectetur adipiscing elit sed do eiusmod tempor incididunt ut labore et -dolore magna aliqua Ut enim ad minim veniam quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo -consequat Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur -Excepteur sint occaecat cupidatat non proident sunt in culpa qui officia deserunt mollit anim id est laborum` +const lorem = `lorem ipsum dolor sit amet consectetur adipiscing elit sed do eiusmod tempor incididunt ut labore et +dolore magna aliqua ut enim ad minim veniam quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo +consequat duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur +excepteur sint occaecat cupidatat non proident sunt in culpa qui officia deserunt mollit anim id est laborum` -var loremStrings = strings.Split(lorem, " ") +var loremStrings = append(strings.Split(rmNewLine(lorem), " "), "") // including emtpy string - to trigger corner cases +func rmNewLine(s string) string { + return strings.ReplaceAll(strings.ReplaceAll(s, "\n", " "), "\r", "") +} func TestDecompressTorrent(t *testing.T) { t.Skip() From 94bc4d3b6606a88c95ac944e1602e6aeb508ab68 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Wed, 6 Nov 2024 10:54:47 +0700 Subject: [PATCH 13/28] [wip] tests: reduce domain cache (#12623) --- erigon-lib/state/cache.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/erigon-lib/state/cache.go b/erigon-lib/state/cache.go index af76f2cd495..0c16e2c41e6 100644 --- a/erigon-lib/state/cache.go +++ b/erigon-lib/state/cache.go @@ -30,7 +30,7 @@ type domainGetFromFileCacheItem struct { } var ( - domainGetFromFileCacheLimit = uint32(dbg.EnvInt("D_LRU", 10_000)) + domainGetFromFileCacheLimit = uint32(dbg.EnvInt("D_LRU", 1_000)) domainGetFromFileCacheTrace = dbg.EnvBool("D_LRU_TRACE", false) domainGetFromFileCacheEnabled = dbg.EnvBool("D_LRU_ENABLED", true) ) From afc45cc228db6336a313321008333fba86e15181 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Wed, 6 Nov 2024 12:04:00 +0700 Subject: [PATCH 14/28] [rpc-test] `debug_getModifiedAccountsByNumber` (#12634) --- .../workflows/qa-rpc-integration-tests.yml | 69 +++++-------------- README.md | 26 +++---- cmd/devnet/devnetutils/utils.go | 14 ++-- common/fdlimit/fdlimit_darwin.go | 6 +- common/fdlimit/fdlimit_unix.go | 6 +- common/fdlimit/fdlimit_windows.go | 6 +- consensus/ethash/consensus_test.go | 4 +- core/types/encdec_test.go | 4 +- core/types/transaction_test.go | 4 +- erigon-lib/chain/snapcfg/util.go | 22 +++--- erigon-lib/common/cmp/cmp.go | 10 +-- .../crypto/bn256/cloudflare/gfp_decl.go | 2 +- erigon-lib/downloader/downloader.go | 8 +-- erigon-lib/rlp2/util.go | 2 +- erigon-lib/seg/decompress_test.go | 4 -- erigon-lib/state/inverted_index.go | 6 +- erigon-lib/types/ssz/ssz.go | 20 +++--- eth/stagedsync/exec3_parallel.go | 5 +- eth/stagedsync/stage_snapshots.go | 30 ++++---- p2p/discover/table.go | 4 +- p2p/netutil/iptrack.go | 6 +- polygon/bor/valset/validator_set.go | 14 ++-- tests/fuzzers/difficulty/difficulty-fuzz.go | 12 ++-- turbo/jsonrpc/debug_api.go | 35 ++++------ .../freezeblocks/bor_snapshots.go | 8 +-- turbo/snapshotsync/snapshots.go | 12 ++-- 26 files changed, 146 insertions(+), 193 deletions(-) diff --git a/.github/workflows/qa-rpc-integration-tests.yml b/.github/workflows/qa-rpc-integration-tests.yml index 1a6e0a0db74..415d0cdf961 100644 --- a/.github/workflows/qa-rpc-integration-tests.yml +++ b/.github/workflows/qa-rpc-integration-tests.yml @@ -87,41 +87,29 @@ jobs: # Run RPC integration test runner via http python3 ./run_tests.py -p 8545 --continue -f --json-diff -x \ - debug_accountRange,debug_getModifiedAccountsByHash,debug_getModifiedAccountsByNumber,debug_storageRangeAt,debug_traceBlockByHash,\ - debug_traceCallMany/test_02.tar,debug_traceCallMany/test_04.tar,debug_traceCallMany/test_05.tar,debug_traceCallMany/test_06.tar,debug_traceCallMany/test_07.tar,debug_traceCallMany/test_09.json,debug_traceCallMany/test_10.tar,\ +# false positives: Erigon return expected response. but rpc-test somehow doesn't see 1 field. + erigon_getHeaderByHash,erigon_getHeaderByNumber,eth_feeHistory,\ +# total difficulty field was removed, then added back + eth_getBlockByHash,eth_getBlockByNumber,\ +# Erigon bugs + debug_accountRange,debug_storageRangeAt,\ +# need update rpc-test - because Erigon is correct (@AskAlexSharov will do after https://github.com/erigontech/erigon/pull/12634) + debug_getModifiedAccountsByHash,debug_getModifiedAccountsByNumber,\ +# Erigon bug https://github.com/erigontech/erigon/issues/12603 + erigon_getLatestLogs,erigon_getLogsByHash/test_04.json,\ +# Erigon bug https://github.com/erigontech/erigon/issues/12637 debug_traceBlockByNumber/test_05.tar,debug_traceBlockByNumber/test_08.tar,debug_traceBlockByNumber/test_09.tar,debug_traceBlockByNumber/test_10.tar,debug_traceBlockByNumber/test_11.tar,debug_traceBlockByNumber/test_12.tar,\ +# remove this line after https://github.com/erigontech/rpc-tests/pull/281 + parity_getBlockReceipts,\ +# to investigate + debug_traceBlockByHash,\ + debug_traceCallMany/test_02.tar,debug_traceCallMany/test_04.tar,debug_traceCallMany/test_05.tar,debug_traceCallMany/test_06.tar,debug_traceCallMany/test_07.tar,debug_traceCallMany/test_09.json,debug_traceCallMany/test_10.tar,\ debug_traceTransaction,\ engine_exchangeCapabilities/test_1.json,\ engine_exchangeTransitionConfigurationV1/test_01.json,\ engine_getClientVersionV1/test_1.json,\ - erigon_getLogsByHash/test_04.json,\ - erigon_getHeaderByHash/test_02.json,\ - erigon_getHeaderByHash/test_03.json,\ - erigon_getHeaderByHash/test_04.json,\ - erigon_getHeaderByHash/test_06.json,\ - erigon_getHeaderByNumber/test_01.json,\ - erigon_getHeaderByNumber/test_02.json,\ - erigon_getHeaderByNumber/test_03.json,\ - erigon_getHeaderByNumber/test_04.json,\ - erigon_getHeaderByNumber/test_05.json,\ - erigon_getHeaderByNumber/test_06.json,\ - erigon_getHeaderByNumber/test_07.json,\ - erigon_getHeaderByNumber/test_08.json,\ - erigon_getLatestLogs/test_01.json,\ - erigon_getLatestLogs/test_02.json,\ - erigon_getLatestLogs/test_03.json,\ - erigon_getLatestLogs/test_04.json,\ - erigon_getLatestLogs/test_05.json,\ - erigon_getLatestLogs/test_06.json,\ - erigon_getLatestLogs/test_08.json,\ - erigon_getLatestLogs/test_09.json,\ - erigon_getLatestLogs/test_10.json,\ - erigon_getLatestLogs/test_11.json,\ - erigon_getLatestLogs/test_12.json,\ erigon_getBalanceChangesInBlock,\ eth_createAccessList/test_16.json,\ -# parity_getBlockReceipts was renamet to eth_getBlockReceipts - parity_getBlockReceipts,\ trace_filter/test_16.json,\ trace_rawTransaction/test_01.json,\ trace_rawTransaction/test_03.json,\ @@ -129,31 +117,6 @@ jobs: admin_peers/test_01.json,\ erigon_nodeInfo/test_1.json,\ eth_coinbase/test_01.json,\ - eth_feeHistory/test_01.json,\ - eth_feeHistory/test_02.json,\ - eth_feeHistory/test_03.json,\ - eth_feeHistory/test_04.json,\ - eth_feeHistory/test_05.json,\ - eth_feeHistory/test_06.json,\ - eth_feeHistory/test_08.json,\ - eth_feeHistory/test_09.json,\ - eth_feeHistory/test_10.json,\ - eth_feeHistory/test_11.json,\ - eth_getBlockByHash/test_01.json,\ - eth_getBlockByHash/test_02.json,\ - eth_getBlockByHash/test_05.json,\ - eth_getBlockByHash/test_06.json,\ - eth_getBlockByHash/test_07.json,\ - eth_getBlockByHash/test_08.json,\ - eth_getBlockByNumber/test_01.json,\ - eth_getBlockByNumber/test_02.json,\ - eth_getBlockByNumber/test_04.json,\ - eth_getBlockByNumber/test_05.json,\ - eth_getBlockByNumber/test_06.json,\ - eth_getBlockByNumber/test_07.json,\ - eth_getBlockByNumber/test_08.json,\ - eth_getBlockByNumber/test_12.json,\ - eth_getBlockByNumber/test_13.json,\ eth_getTransactionByHash/test_02.json,\ eth_getWork/test_01.json,\ eth_mining/test_01.json,\ diff --git a/README.md b/README.md index a863e843bb4..64f9ed319c5 100644 --- a/README.md +++ b/README.md @@ -203,18 +203,20 @@ du -hsc /erigon/snapshots/* ### Erigon3 changes from Erigon2 -- Initial sync does download LatestState and it's history - no re-exec from 0 anymore. -- ExecutionStage included many E2 stages: stage_hash_state, stage_trie, log_index, history_index, trace_index -- E3 can execute 1 historical transaction - without executing it's block - because history/indices have - transaction-granularity, instead of block-granularity. -- E3 doesn't store Logs (aka Receipts) - it always re-executing historical txn (but it's cheaper then in E2 - see point - above). -- Restart doesn't loose much partial progress: `--sync.loop.block.limit=5_000` enabled by default -- `chaindata` is less than `15gb`. It's ok to `rm -rf chaindata`. To prevent it's grow: recommend `--batchSize <= 1G` -- can symlink/mount latest state to fast drive and history to cheap drive -- `--internalcl` is enabled by default. to disable use `--externalcl` -- `--prune` flags changed: default `--prune.mode=archive`, FullNode: `--prune.mode=full`, MinimalNode (EIP-4444): - `--prune.mode=minimal`. +- **Initial sync doesn't re-exec from 0:** downloading 99% LatestState and History +- **Per-Transaction granularity of history** (Erigon2 had per-block). Means: + - Can execute 1 historical transaction - without executing it's block + - If account X change V1->V2->V1 within 1 block (different transactions): `debug_getModifiedAccountsByNumber` return + it + - Erigon3 doesn't store Logs (aka Receipts) - it always re-executing historical txn (but it's cheaper) +- **Validator mode**: added. `--internalcl` is enabled by default. to disable use `--externalcl`. +- **Store most of data in immutable files (segments/snapshots):** + - can symlink/mount latest state to fast drive and history to cheap drive + - `chaindata` is less than `15gb`. It's ok to `rm -rf chaindata`. (to prevent grow: recommend `--batchSize <= 1G`) +- **`--prune` flags changed**: see `--prune.mode` (default: `archive`, full: `full`, EIP-4444: `minimal`) +- **Other changes:** + - ExecutionStage included many E2 stages: stage_hash_state, stage_trie, log_index, history_index, trace_index + - Restart doesn't loose much partial progress: `--sync.loop.block.limit=5_000` enabled by default ### Logging diff --git a/cmd/devnet/devnetutils/utils.go b/cmd/devnet/devnetutils/utils.go index 4b9e3fe6c97..c88cbce96b2 100644 --- a/cmd/devnet/devnetutils/utils.go +++ b/cmd/devnet/devnetutils/utils.go @@ -117,14 +117,14 @@ func UniqueIDFromEnode(enode string) (string, error) { return enode[:i], nil } -func RandomInt(max int) int { - if max == 0 { +func RandomInt(_max int) int { + if _max == 0 { return 0 } var n uint16 binary.Read(rand.Reader, binary.LittleEndian, &n) - return int(n) % (max + 1) + return int(n) % (_max + 1) } // NamespaceAndSubMethodFromMethod splits a parent method into namespace and the actual method @@ -142,10 +142,10 @@ func GenerateTopic(signature string) []libcommon.Hash { } // RandomNumberInRange returns a random number between min and max NOT inclusive -func RandomNumberInRange(min, max uint64) (uint64, error) { - if max <= min { - return 0, fmt.Errorf("Invalid range: upper bound %d less or equal than lower bound %d", max, min) +func RandomNumberInRange(_min, _max uint64) (uint64, error) { + if _max <= _min { + return 0, fmt.Errorf("Invalid range: upper bound %d less or equal than lower bound %d", _max, _min) } - return uint64(RandomInt(int(max-min)) + int(min)), nil + return uint64(RandomInt(int(_max-_min)) + int(_min)), nil } diff --git a/common/fdlimit/fdlimit_darwin.go b/common/fdlimit/fdlimit_darwin.go index c59be293476..7d8b7f2fd5c 100644 --- a/common/fdlimit/fdlimit_darwin.go +++ b/common/fdlimit/fdlimit_darwin.go @@ -27,7 +27,7 @@ const hardlimit = 10240 // Raise tries to maximize the file descriptor allowance of this process // to the maximum hard-limit allowed by the OS. // Returns the size it was set to (may differ from the desired 'max') -func Raise(max uint64) (uint64, error) { +func Raise(_max uint64) (uint64, error) { // Get the current limit var limit syscall.Rlimit if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil { @@ -35,8 +35,8 @@ func Raise(max uint64) (uint64, error) { } // Try to update the limit to the max allowance limit.Cur = limit.Max - if limit.Cur > max { - limit.Cur = max + if limit.Cur > _max { + limit.Cur = _max } if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil { return 0, err diff --git a/common/fdlimit/fdlimit_unix.go b/common/fdlimit/fdlimit_unix.go index 2f3ac908cc8..eebb72fde2e 100644 --- a/common/fdlimit/fdlimit_unix.go +++ b/common/fdlimit/fdlimit_unix.go @@ -26,7 +26,7 @@ import "syscall" // Raise tries to maximize the file descriptor allowance of this process // to the maximum hard-limit allowed by the OS. // Returns the size it was set to (may differ from the desired 'max') -func Raise(max uint64) (uint64, error) { +func Raise(_max uint64) (uint64, error) { // Get the current limit var limit syscall.Rlimit if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil { @@ -34,8 +34,8 @@ func Raise(max uint64) (uint64, error) { } // Try to update the limit to the max allowance limit.Cur = limit.Max - if limit.Cur > max { - limit.Cur = max + if limit.Cur > _max { + limit.Cur = _max } if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil { return 0, err diff --git a/common/fdlimit/fdlimit_windows.go b/common/fdlimit/fdlimit_windows.go index c7897072626..5a1137050bc 100644 --- a/common/fdlimit/fdlimit_windows.go +++ b/common/fdlimit/fdlimit_windows.go @@ -26,17 +26,17 @@ const hardlimit = 16384 // Raise tries to maximize the file descriptor allowance of this process // to the maximum hard-limit allowed by the OS. -func Raise(max uint64) (uint64, error) { +func Raise(_max uint64) (uint64, error) { // This method is NOP by design: // * Linux/Darwin counterparts need to manually increase per process limits // * On Windows Go uses the CreateFile API, which is limited to 16K files, non // changeable from within a running process // This way we can always "request" raising the limits, which will either have // or not have effect based on the platform we're running on. - if max > hardlimit { + if _max > hardlimit { return hardlimit, fmt.Errorf("file descriptor limit (%d) reached", hardlimit) } - return max, nil + return _max, nil } // Current retrieves the number of file descriptors allowed to be opened by this diff --git a/consensus/ethash/consensus_test.go b/consensus/ethash/consensus_test.go index 7ac1e5d5dd0..4436a0bfa46 100644 --- a/consensus/ethash/consensus_test.go +++ b/consensus/ethash/consensus_test.go @@ -95,11 +95,11 @@ func TestCalcDifficulty(t *testing.T) { } } -func randSlice(min, max uint32) []byte { +func randSlice(_min, _max uint32) []byte { var b = make([]byte, 4) rand.Read(b) a := binary.LittleEndian.Uint32(b) - size := min + a%(max-min) + size := _min + a%(_max-_min) out := make([]byte, size) rand.Read(out) return out diff --git a/core/types/encdec_test.go b/core/types/encdec_test.go index 69a19c5a0c1..296f5467fec 100644 --- a/core/types/encdec_test.go +++ b/core/types/encdec_test.go @@ -44,8 +44,8 @@ func NewTRand() *TRand { return &TRand{rnd: rand.New(src)} } -func (tr *TRand) RandIntInRange(min, max int) int { - return (tr.rnd.Intn(max-min) + min) +func (tr *TRand) RandIntInRange(_min, _max int) int { + return (tr.rnd.Intn(_max-_min) + _min) } func (tr *TRand) RandUint64() *uint64 { diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go index b5aeb0f98b6..fde05e8680e 100644 --- a/core/types/transaction_test.go +++ b/core/types/transaction_test.go @@ -549,8 +549,8 @@ const N = 50 var dummyBlobTxs = [N]*BlobTx{} var dummyBlobWrapperTxs = [N]*BlobTxWrapper{} -func randIntInRange(min, max int) int { - return (rand.Intn(max-min) + min) +func randIntInRange(_min, _max int) int { + return (rand.Intn(_max-_min) + _min) } func randAddr() *libcommon.Address { diff --git a/erigon-lib/chain/snapcfg/util.go b/erigon-lib/chain/snapcfg/util.go index f60de31aaff..6a13bff1e6a 100644 --- a/erigon-lib/chain/snapcfg/util.go +++ b/erigon-lib/chain/snapcfg/util.go @@ -245,7 +245,7 @@ func (p Preverified) Versioned(preferredVersion snaptype.Version, minVersion sna } func (p Preverified) MaxBlock(version snaptype.Version) (uint64, error) { - max := uint64(0) + _max := uint64(0) for _, p := range p { _, fileName := filepath.Split(p.Name) ext := filepath.Ext(fileName) @@ -261,16 +261,16 @@ func (p Preverified) MaxBlock(version snaptype.Version) (uint64, error) { return 0, err } - if max < to { - max = to + if _max < to { + _max = to } } - if max == 0 { // to prevent underflow + if _max == 0 { // to prevent underflow return 0, nil } - return max*1_000 - 1, nil + return _max*1_000 - 1, nil } var errWrongVersion = errors.New("wrong version") @@ -464,17 +464,17 @@ func MergeLimitFromCfg(cfg *Cfg, snapType snaptype.Enum, fromBlock uint64) uint6 } func MaxSeedableSegment(chain string, dir string) uint64 { - var max uint64 + var _max uint64 if list, err := snaptype.Segments(dir); err == nil { for _, info := range list { - if Seedable(chain, info) && info.Type.Enum() == snaptype.MinCoreEnum && info.To > max { - max = info.To + if Seedable(chain, info) && info.Type.Enum() == snaptype.MinCoreEnum && info.To > _max { + _max = info.To } } } - return max + return _max } var oldMergeSteps = append([]uint64{snaptype.Erigon2OldMergeLimit}, snaptype.MergeSteps...) @@ -498,14 +498,14 @@ func KnownCfg(networkName string) *Cfg { return newCfg(networkName, c.Typed(knownTypes[networkName])) } -func VersionedCfg(networkName string, preferred snaptype.Version, min snaptype.Version) *Cfg { +func VersionedCfg(networkName string, preferred snaptype.Version, _min snaptype.Version) *Cfg { c, ok := knownPreverified[networkName] if !ok { return newCfg(networkName, Preverified{}) } - return newCfg(networkName, c.Versioned(preferred, min)) + return newCfg(networkName, c.Versioned(preferred, _min)) } var KnownWebseeds = map[string][]string{ diff --git a/erigon-lib/common/cmp/cmp.go b/erigon-lib/common/cmp/cmp.go index 8ee45182c17..db832450987 100644 --- a/erigon-lib/common/cmp/cmp.go +++ b/erigon-lib/common/cmp/cmp.go @@ -21,12 +21,12 @@ import ( ) // InRange - ensure val is in [min,max] range -func InRange[T cmp.Ordered](min, max, val T) T { - if min >= val { - return min +func InRange[T cmp.Ordered](_min, _max, val T) T { + if _min >= val { + return _min } - if max <= val { - return max + if _max <= val { + return _max } return val } diff --git a/erigon-lib/crypto/bn256/cloudflare/gfp_decl.go b/erigon-lib/crypto/bn256/cloudflare/gfp_decl.go index 072e32b0888..23df6f186f4 100644 --- a/erigon-lib/crypto/bn256/cloudflare/gfp_decl.go +++ b/erigon-lib/crypto/bn256/cloudflare/gfp_decl.go @@ -11,7 +11,7 @@ import ( var hasBMI2 = cpu.X86.HasBMI2 -// go:noescape +//go:noescape func gfpNeg(c, a *gfP) //go:noescape diff --git a/erigon-lib/downloader/downloader.go b/erigon-lib/downloader/downloader.go index cf89315feb3..6fb5eb1cbfc 100644 --- a/erigon-lib/downloader/downloader.go +++ b/erigon-lib/downloader/downloader.go @@ -174,7 +174,7 @@ func insertCloudflareHeaders(req *http.Request) { // It also tries to parse Retry-After response header when a http.StatusTooManyRequests // (HTTP Code 429) is found in the resp parameter. Hence it will return the number of // seconds the server states it may be ready to process more requests from this client. -func calcBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { +func calcBackoff(_min, _max time.Duration, attemptNum int, resp *http.Response) time.Duration { if resp != nil { if resp.StatusCode == http.StatusTooManyRequests || resp.StatusCode == http.StatusServiceUnavailable { if s, ok := resp.Header["Retry-After"]; ok { @@ -185,10 +185,10 @@ func calcBackoff(min, max time.Duration, attemptNum int, resp *http.Response) ti } } - mult := math.Pow(2, float64(attemptNum)) * float64(min) + mult := math.Pow(2, float64(attemptNum)) * float64(_min) sleep := time.Duration(mult) - if float64(sleep) != mult || sleep > max { - sleep = max + if float64(sleep) != mult || sleep > _max { + sleep = _max } return sleep diff --git a/erigon-lib/rlp2/util.go b/erigon-lib/rlp2/util.go index 7cb1b78ed10..c3d1de93d81 100644 --- a/erigon-lib/rlp2/util.go +++ b/erigon-lib/rlp2/util.go @@ -76,7 +76,7 @@ func identifyToken(b byte) Token { return TokenLongBlob case b >= 192 && b <= 247: return TokenShortList - case b >= 248 && b <= 255: + case b >= 248: return TokenLongList } return TokenUnknown diff --git a/erigon-lib/seg/decompress_test.go b/erigon-lib/seg/decompress_test.go index 9e6ba470323..1568b06f4d9 100644 --- a/erigon-lib/seg/decompress_test.go +++ b/erigon-lib/seg/decompress_test.go @@ -566,10 +566,6 @@ func generateRandWords() { WORDS[N-1] = []byte{} } -func randIntInRange(min, max int) int { - return (rand.Intn(max-min) + min) -} - func clearPrevDict() { WORDS = [N][]byte{} WORD_FLAGS = [N]bool{} diff --git a/erigon-lib/state/inverted_index.go b/erigon-lib/state/inverted_index.go index fab8c6dd9ea..9146e971b20 100644 --- a/erigon-lib/state/inverted_index.go +++ b/erigon-lib/state/inverted_index.go @@ -1323,9 +1323,9 @@ func (it *InvertedIterator1) advanceInFiles() { } if !bytes.Equal(key, it.key) { ef, _ := eliasfano32.ReadEliasFano(val) - min := ef.Get(0) - max := ef.Max() - if min < it.endTxNum && max >= it.startTxNum { // Intersection of [min; max) and [it.startTxNum; it.endTxNum) + _min := ef.Get(0) + _max := ef.Max() + if _min < it.endTxNum && _max >= it.startTxNum { // Intersection of [min; max) and [it.startTxNum; it.endTxNum) it.key = key it.nextFileKey = key return diff --git a/erigon-lib/types/ssz/ssz.go b/erigon-lib/types/ssz/ssz.go index 60800543ae1..40d5ad3a19a 100644 --- a/erigon-lib/types/ssz/ssz.go +++ b/erigon-lib/types/ssz/ssz.go @@ -85,7 +85,7 @@ func UnmarshalUint64SSZ(x []byte) uint64 { return binary.LittleEndian.Uint64(x) } -func DecodeDynamicList[T Unmarshaler](bytes []byte, start, end uint32, max uint64, version int) ([]T, error) { +func DecodeDynamicList[T Unmarshaler](bytes []byte, start, end uint32, _max uint64, version int) ([]T, error) { if start > end || len(bytes) < int(end) { return nil, ErrBadOffset } @@ -96,7 +96,7 @@ func DecodeDynamicList[T Unmarshaler](bytes []byte, start, end uint32, max uint6 elementsNum = currentOffset / 4 } inPos := 4 - if uint64(elementsNum) > max { + if uint64(elementsNum) > _max { return nil, ErrTooBigList } objs := make([]T, elementsNum) @@ -121,7 +121,7 @@ func DecodeDynamicList[T Unmarshaler](bytes []byte, start, end uint32, max uint6 return objs, nil } -func DecodeStaticList[T Unmarshaler](bytes []byte, start, end, bytesPerElement uint32, max uint64, version int) ([]T, error) { +func DecodeStaticList[T Unmarshaler](bytes []byte, start, end, bytesPerElement uint32, _max uint64, version int) ([]T, error) { if start > end || len(bytes) < int(end) { return nil, ErrBadOffset } @@ -131,7 +131,7 @@ func DecodeStaticList[T Unmarshaler](bytes []byte, start, end, bytesPerElement u if uint32(len(buf))%bytesPerElement != 0 { return nil, ErrBufferNotRounded } - if elementsNum > max { + if elementsNum > _max { return nil, ErrTooBigList } objs := make([]T, elementsNum) @@ -144,7 +144,7 @@ func DecodeStaticList[T Unmarshaler](bytes []byte, start, end, bytesPerElement u return objs, nil } -func DecodeHashList(bytes []byte, start, end, max uint32) ([]common.Hash, error) { +func DecodeHashList(bytes []byte, start, end, _max uint32) ([]common.Hash, error) { if start > end || len(bytes) < int(end) { return nil, ErrBadOffset } @@ -154,7 +154,7 @@ func DecodeHashList(bytes []byte, start, end, max uint32) ([]common.Hash, error) if uint32(len(buf))%length.Hash != 0 { return nil, ErrBufferNotRounded } - if elementsNum > max { + if elementsNum > _max { return nil, ErrTooBigList } objs := make([]common.Hash, elementsNum) @@ -164,7 +164,7 @@ func DecodeHashList(bytes []byte, start, end, max uint32) ([]common.Hash, error) return objs, nil } -func DecodeNumbersList(bytes []byte, start, end uint32, max uint64) ([]uint64, error) { +func DecodeNumbersList(bytes []byte, start, end uint32, _max uint64) ([]uint64, error) { if start > end || len(bytes) < int(end) { return nil, ErrBadOffset } @@ -174,7 +174,7 @@ func DecodeNumbersList(bytes []byte, start, end uint32, max uint64) ([]uint64, e if uint64(len(buf))%length.BlockNum != 0 { return nil, ErrBufferNotRounded } - if elementsNum > max { + if elementsNum > _max { return nil, ErrTooBigList } objs := make([]uint64, elementsNum) @@ -195,12 +195,12 @@ func CalculateIndiciesLimit(maxCapacity, numItems, size uint64) uint64 { return numItems } -func DecodeString(bytes []byte, start, end, max uint64) ([]byte, error) { +func DecodeString(bytes []byte, start, end, _max uint64) ([]byte, error) { if start > end || len(bytes) < int(end) { return nil, ErrBadOffset } buf := bytes[start:end] - if uint64(len(buf)) > max { + if uint64(len(buf)) > _max { return nil, ErrTooBigList } return buf, nil diff --git a/eth/stagedsync/exec3_parallel.go b/eth/stagedsync/exec3_parallel.go index c65c83c863c..79aa06c6bcb 100644 --- a/eth/stagedsync/exec3_parallel.go +++ b/eth/stagedsync/exec3_parallel.go @@ -4,11 +4,12 @@ import ( "context" "errors" "fmt" - chaos_monkey "github.com/erigontech/erigon/tests/chaos-monkey" "sync" "sync/atomic" "time" + chaos_monkey "github.com/erigontech/erigon/tests/chaos-monkey" + "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/kv" "github.com/erigontech/erigon-lib/log/v3" @@ -335,7 +336,7 @@ func (pe *parallelExecutor) rwLoop(ctx context.Context, maxTxNum uint64, logger defer tx.Rollback() pe.doms.SetTx(tx) - applyCtx, cancelApplyCtx = context.WithCancel(ctx) + applyCtx, cancelApplyCtx = context.WithCancel(ctx) //nolint:fatcontext defer cancelApplyCtx() pe.applyLoopWg.Add(1) go pe.applyLoop(applyCtx, maxTxNum, &blockComplete, pe.rwLoopErrCh) diff --git a/eth/stagedsync/stage_snapshots.go b/eth/stagedsync/stage_snapshots.go index d4a12250055..f8e056623e9 100644 --- a/eth/stagedsync/stage_snapshots.go +++ b/eth/stagedsync/stage_snapshots.go @@ -758,22 +758,22 @@ func (u *snapshotUploader) init(ctx context.Context, logger log.Logger) { } func (u *snapshotUploader) maxUploadedHeader() uint64 { - var max uint64 + var _max uint64 if len(u.files) > 0 { for _, state := range u.files { if state.local && state.remote { if state.info != nil { if state.info.Type.Enum() == coresnaptype.Enums.Headers { - if state.info.To > max { - max = state.info.To + if state.info.To > _max { + _max = state.info.To } } } else { if info, _, ok := snaptype.ParseFileName(u.cfg.dirs.Snap, state.file); ok { if info.Type.Enum() == coresnaptype.Enums.Headers { - if info.To > max { - max = info.To + if info.To > _max { + _max = info.To } } state.info = &info @@ -783,7 +783,7 @@ func (u *snapshotUploader) maxUploadedHeader() uint64 { } } - return max + return _max } type dirEntry struct { @@ -1040,25 +1040,25 @@ func (u *snapshotUploader) downloadLatestSnapshots(ctx context.Context, blockNum } } - var min uint64 + var _min uint64 for _, info := range lastSegments { if lastInfo, ok := info.Sys().(downloader.SnapInfo); ok { - if min == 0 || lastInfo.From() < min { - min = lastInfo.From() + if _min == 0 || lastInfo.From() < _min { + _min = lastInfo.From() } } } for segType, info := range lastSegments { if lastInfo, ok := info.Sys().(downloader.SnapInfo); ok { - if lastInfo.From() > min { + if lastInfo.From() > _min { for _, ent := range entries { if info, err := ent.Info(); err == nil { snapInfo, ok := info.Sys().(downloader.SnapInfo) if ok && snapInfo.Type().Enum() == segType && - snapInfo.From() == min { + snapInfo.From() == _min { lastSegments[segType] = info } } @@ -1088,17 +1088,17 @@ func (u *snapshotUploader) maxSeedableHeader() uint64 { } func (u *snapshotUploader) minBlockNumber() uint64 { - var min uint64 + var _min uint64 if list, err := snaptype.Segments(u.cfg.dirs.Snap); err == nil { for _, info := range list { - if u.seedable(info) && min == 0 || info.From < min { - min = info.From + if u.seedable(info) && _min == 0 || info.From < _min { + _min = info.From } } } - return min + return _min } func expandHomeDir(dirpath string) string { diff --git a/p2p/discover/table.go b/p2p/discover/table.go index 3fb9a240d31..330fd7ac484 100644 --- a/p2p/discover/table.go +++ b/p2p/discover/table.go @@ -745,8 +745,8 @@ func contains(ns []*node, id enode.ID) bool { } // pushNode adds n to the front of list, keeping at most max items. -func pushNode(list []*node, n *node, max int) ([]*node, *node) { - if len(list) < max { +func pushNode(list []*node, n *node, _max int) ([]*node, *node) { + if len(list) < _max { list = append(list, nil) } removed := list[len(list)-1] diff --git a/p2p/netutil/iptrack.go b/p2p/netutil/iptrack.go index c902bf97dbc..36b3a1df5b9 100644 --- a/p2p/netutil/iptrack.go +++ b/p2p/netutil/iptrack.go @@ -82,15 +82,15 @@ func (it *IPTracker) PredictEndpoint() string { // The current strategy is simple: find the endpoint with most statements. counts := make(map[string]int) - maxcount, max := 0, "" + maxcount, _max := 0, "" for _, s := range it.statements { c := counts[s.endpoint] + 1 counts[s.endpoint] = c if c > maxcount && c >= it.minStatements { - maxcount, max = c, s.endpoint + maxcount, _max = c, s.endpoint } } - return max + return _max } // AddStatement records that a certain host thinks our external endpoint is the one given. diff --git a/polygon/bor/valset/validator_set.go b/polygon/bor/valset/validator_set.go index 6599c9f8f6c..1cc5e9e7c29 100644 --- a/polygon/bor/valset/validator_set.go +++ b/polygon/bor/valset/validator_set.go @@ -190,20 +190,20 @@ func computeMaxMinPriorityDiff(vals *ValidatorSet) int64 { panic("empty validator set") } - max := int64(math.MinInt64) - min := int64(math.MaxInt64) + _max := int64(math.MinInt64) + _min := int64(math.MaxInt64) for _, v := range vals.Validators { - if v.ProposerPriority < min { - min = v.ProposerPriority + if v.ProposerPriority < _min { + _min = v.ProposerPriority } - if v.ProposerPriority > max { - max = v.ProposerPriority + if v.ProposerPriority > _max { + _max = v.ProposerPriority } } - diff := max - min + diff := _max - _min if diff < 0 { return -1 * diff diff --git a/tests/fuzzers/difficulty/difficulty-fuzz.go b/tests/fuzzers/difficulty/difficulty-fuzz.go index 360d8581bd6..9e7b82d96b5 100644 --- a/tests/fuzzers/difficulty/difficulty-fuzz.go +++ b/tests/fuzzers/difficulty/difficulty-fuzz.go @@ -45,11 +45,11 @@ func (f *fuzzer) read(size int) []byte { return out } -func (f *fuzzer) readSlice(min, max int) []byte { +func (f *fuzzer) readSlice(_min, _max int) []byte { var a uint16 //nolint:errcheck binary.Read(f.input, binary.LittleEndian, &a) - size := min + int(a)%(max-min) + size := _min + int(a)%(_max-_min) out := make([]byte, size) if _, err := f.input.Read(out); err != nil { f.exhausted = true @@ -57,15 +57,15 @@ func (f *fuzzer) readSlice(min, max int) []byte { return out } -func (f *fuzzer) readUint64(min, max uint64) uint64 { - if min == max { - return min +func (f *fuzzer) readUint64(_min, _max uint64) uint64 { + if _min == _max { + return _min } var a uint64 if err := binary.Read(f.input, binary.LittleEndian, &a); err != nil { f.exhausted = true } - a = min + a%(max-min) + a = _min + a%(_max-_min) return a } func (f *fuzzer) readBool() bool { diff --git a/turbo/jsonrpc/debug_api.go b/turbo/jsonrpc/debug_api.go index d932b50ccf9..78e96f62f5e 100644 --- a/turbo/jsonrpc/debug_api.go +++ b/turbo/jsonrpc/debug_api.go @@ -21,14 +21,13 @@ import ( "errors" "fmt" - jsoniter "github.com/json-iterator/go" - "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/common/hexutil" "github.com/erigontech/erigon-lib/common/hexutility" "github.com/erigontech/erigon-lib/kv" "github.com/erigontech/erigon-lib/kv/order" "github.com/erigontech/erigon-lib/kv/rawdbv3" + jsoniter "github.com/json-iterator/go" "github.com/erigontech/erigon/core/state" "github.com/erigontech/erigon/core/types/accounts" @@ -205,48 +204,40 @@ func (api *PrivateDebugAPIImpl) GetModifiedAccountsByNumber(ctx context.Context, if startNum > endNum { return nil, fmt.Errorf("start block (%d) must be less than or equal to end block (%d)", startNum, endNum) } - //[from, to) startTxNum, err := txNumsReader.Min(tx, startNum) if err != nil { return nil, err } - endTxNum, err := txNumsReader.Max(tx, endNum-1) + endTxNum, err := txNumsReader.Min(tx, endNum) if err != nil { return nil, err } - return getModifiedAccountsV3(tx.(kv.TemporalTx), startTxNum, endTxNum) + return getModifiedAccounts(tx.(kv.TemporalTx), startTxNum, endTxNum) } -// getModifiedAccountsV3 returns a list of addresses that were modified in the block range +// getModifiedAccounts returns a list of addresses that were modified in the block range // [startNum:endNum) -func getModifiedAccountsV3(tx kv.TemporalTx, startTxNum, endTxNum uint64) ([]common.Address, error) { +func getModifiedAccounts(tx kv.TemporalTx, startTxNum, endTxNum uint64) ([]common.Address, error) { it, err := tx.HistoryRange(kv.AccountsHistory, int(startTxNum), int(endTxNum), order.Asc, kv.Unlim) if err != nil { return nil, err } defer it.Close() - changedAddrs := make(map[common.Address]struct{}) + var result []common.Address + saw := make(map[common.Address]struct{}) for it.HasNext() { k, _, err := it.Next() if err != nil { return nil, err } - changedAddrs[common.BytesToAddress(k)] = struct{}{} - } - - if len(changedAddrs) == 0 { - return nil, nil - } - - idx := 0 - result := make([]common.Address, len(changedAddrs)) - for addr := range changedAddrs { - copy(result[idx][:], addr[:]) - idx++ + //TODO: data is sorted, enough to compare with prevKey + if _, ok := saw[common.BytesToAddress(k)]; !ok { + saw[common.BytesToAddress(k)] = struct{}{} + result = append(result, common.BytesToAddress(k)) + } } - return result, nil } @@ -294,7 +285,7 @@ func (api *PrivateDebugAPIImpl) GetModifiedAccountsByHash(ctx context.Context, s if err != nil { return nil, err } - return getModifiedAccountsV3(tx.(kv.TemporalTx), startTxNum, endTxNum) + return getModifiedAccounts(tx.(kv.TemporalTx), startTxNum, endTxNum) } func (api *PrivateDebugAPIImpl) AccountAt(ctx context.Context, blockHash common.Hash, txIndex uint64, address common.Address) (*AccountResult, error) { diff --git a/turbo/snapshotsync/freezeblocks/bor_snapshots.go b/turbo/snapshotsync/freezeblocks/bor_snapshots.go index 744b6900d23..5b681954f2c 100644 --- a/turbo/snapshotsync/freezeblocks/bor_snapshots.go +++ b/turbo/snapshotsync/freezeblocks/bor_snapshots.go @@ -146,7 +146,7 @@ func (br *BlockRetire) retireBorBlocks(ctx context.Context, minBlockNum uint64, // this is one off code to fix an issue in 2.49.x->2.52.x which missed // removal of intermediate segments after a merge operation -func removeBorOverlaps(dir string, active []snaptype.FileInfo, max uint64) { +func removeBorOverlaps(dir string, active []snaptype.FileInfo, _max uint64) { list, err := snaptype.Segments(dir) if err != nil { @@ -165,12 +165,12 @@ func removeBorOverlaps(dir string, active []snaptype.FileInfo, max uint64) { // added overhead to make sure we don't delete in the // current 500k block segment - if max > 500_001 { - max -= 500_001 + if _max > 500_001 { + _max -= 500_001 } for _, f := range l { - if max < f.From { + if _max < f.From { continue } diff --git a/turbo/snapshotsync/snapshots.go b/turbo/snapshotsync/snapshots.go index 3fc4d054cc3..da86afbc070 100644 --- a/turbo/snapshotsync/snapshots.go +++ b/turbo/snapshotsync/snapshots.go @@ -838,7 +838,7 @@ func (s *RoSnapshots) dirtyIdxAvailability(segtype snaptype.Enum) uint64 { return 0 } - var max uint64 + var _max uint64 dirty.Walk(func(segments []*DirtySegment) bool { for _, seg := range segments { @@ -846,30 +846,30 @@ func (s *RoSnapshots) dirtyIdxAvailability(segtype snaptype.Enum) uint64 { break } - max = seg.to - 1 + _max = seg.to - 1 } return true }) - return max + return _max } func (s *RoSnapshots) visibleIdxAvailability(segtype snaptype.Enum) uint64 { tx := s.ViewType(segtype.Type()) defer tx.Close() - var max uint64 + var _max uint64 for _, seg := range tx.Segments { if !seg.IsIndexed() { break } - max = seg.to - 1 + _max = seg.to - 1 } - return max + return _max } func (s *RoSnapshots) Ls() { From 9d270795ed86ed7b57275cb8f290d60b65e72ff4 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Wed, 6 Nov 2024 07:52:28 +0100 Subject: [PATCH 15/28] Minimally lock the state during aggregate processing + memory tuning (#12571) - Changed the positions of lock release in aggregate and proof - Removed diffset because it was taking way too much memory and caused way too many allocs - also it is a data structure only used for some debug endpoints so not worth it - Removed compressor for caches (most of those are random 32-bytes hashes anyway) --- .../fork_graph/diff_storage/diff_storage.go | 131 ------------------ .../diff_storage/diff_storage_test.go | 100 ------------- .../forkchoice/fork_graph/fork_graph_disk.go | 92 +++--------- .../fork_graph/fork_graph_disk_fs.go | 20 +-- 4 files changed, 27 insertions(+), 316 deletions(-) delete mode 100644 cl/phase1/forkchoice/fork_graph/diff_storage/diff_storage.go delete mode 100644 cl/phase1/forkchoice/fork_graph/diff_storage/diff_storage_test.go diff --git a/cl/phase1/forkchoice/fork_graph/diff_storage/diff_storage.go b/cl/phase1/forkchoice/fork_graph/diff_storage/diff_storage.go deleted file mode 100644 index 330c758e014..00000000000 --- a/cl/phase1/forkchoice/fork_graph/diff_storage/diff_storage.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2024 The Erigon Authors -// This file is part of Erigon. -// -// Erigon is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// Erigon is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with Erigon. If not, see . - -package diffstorage - -import ( - "bytes" - "io" - "sync" - - "github.com/alecthomas/atomic" - libcommon "github.com/erigontech/erigon-lib/common" -) - -const maxDumps = 8 // max number of dumps to keep in memory to prevent from memory leak during long non-finality. - -var bufferPool = sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, -} - -type link struct { - from libcommon.Hash - to libcommon.Hash -} - -// Memory storage for binary diffs -type ChainDiffStorage struct { - dumps sync.Map - parent sync.Map // maps child -> parent - links sync.Map // maps root -> []links - diffFn func(w io.Writer, old, new []byte) error - applyFn func(in, out []byte, diff []byte, reverse bool) ([]byte, error) - diffs sync.Map - dumpsCount atomic.Int32 // prevent from memory leak during long non-finality. -} - -func NewChainDiffStorage(diffFn func(w io.Writer, old, new []byte) error, applyFn func(in, out []byte, diff []byte, reverse bool) ([]byte, error)) *ChainDiffStorage { - return &ChainDiffStorage{ - diffFn: diffFn, - applyFn: applyFn, - dumpsCount: atomic.NewInt32(0), - } -} - -func (c *ChainDiffStorage) Insert(root, parent libcommon.Hash, prevDump, dump []byte, isDump bool) error { - c.parent.Store(root, parent) - if isDump { - c.dumpsCount.Add(1) - if c.dumpsCount.Load() > maxDumps { - *c = *NewChainDiffStorage(c.diffFn, c.applyFn) - c.dumpsCount.Store(0) - return nil - } - c.dumps.Store(root, libcommon.Copy(dump)) - return nil - } - - buf := bufferPool.Get().(*bytes.Buffer) - defer bufferPool.Put(buf) - buf.Reset() - - if err := c.diffFn(buf, prevDump, dump); err != nil { - return err - } - c.diffs.Store(link{from: parent, to: root}, libcommon.Copy(buf.Bytes())) - - links, _ := c.links.LoadOrStore(parent, []link{}) - c.links.Store(parent, append(links.([]link), link{from: parent, to: root})) - - return nil -} - -func (c *ChainDiffStorage) Get(root libcommon.Hash) ([]byte, error) { - dump, foundDump := c.dumps.Load(root) - if foundDump { - return dump.([]byte), nil - } - currentRoot := root - diffs := [][]byte{} - for !foundDump { - parent, found := c.parent.Load(currentRoot) - if !found { - return nil, nil - } - diff, foundDiff := c.diffs.Load(link{from: parent.(libcommon.Hash), to: currentRoot}) - if !foundDiff { - return nil, nil - } - diffs = append(diffs, diff.([]byte)) - currentRoot = parent.(libcommon.Hash) - dump, foundDump = c.dumps.Load(currentRoot) - } - out := libcommon.Copy(dump.([]byte)) - for i := len(diffs) - 1; i >= 0; i-- { - var err error - out, err = c.applyFn(out, out, diffs[i], false) - if err != nil { - return nil, err - } - } - return out, nil -} - -func (c *ChainDiffStorage) Delete(root libcommon.Hash) { - if _, loaded := c.dumps.LoadAndDelete(root); loaded { - c.dumpsCount.Add(-1) - } - c.parent.Delete(root) - links, ok := c.links.Load(root) - if ok { - for _, link := range links.([]link) { - c.diffs.Delete(link) - } - } - c.links.Delete(root) -} diff --git a/cl/phase1/forkchoice/fork_graph/diff_storage/diff_storage_test.go b/cl/phase1/forkchoice/fork_graph/diff_storage/diff_storage_test.go deleted file mode 100644 index e4a6835dcd0..00000000000 --- a/cl/phase1/forkchoice/fork_graph/diff_storage/diff_storage_test.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2024 The Erigon Authors -// This file is part of Erigon. -// -// Erigon is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// Erigon is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with Erigon. If not, see . - -package diffstorage - -import ( - "math" - "testing" - - libcommon "github.com/erigontech/erigon-lib/common" - "github.com/erigontech/erigon/cl/cltypes/solid" - "github.com/erigontech/erigon/cl/persistence/base_encoding" - "github.com/stretchr/testify/require" -) - -// 1 -> 2 -> 3 -> 4 -> 5 -// -// | -// --> 6 -func TestDiffStorage(t *testing.T) { - // decleare 5 nodes - node1 := libcommon.Hash{1} - node2 := libcommon.Hash{2} - node3 := libcommon.Hash{3} - node4 := libcommon.Hash{4} - node5 := libcommon.Hash{5} - node6 := libcommon.Hash{6} - - node1Content := []uint64{1, 2, 3, 4, 5} - node2Content := []uint64{1, 2, 3, 4, 5, 6} - node3Content := []uint64{1, 2, 3, 4, 5, 2, 7} - node4Content := []uint64{1, 2, 3, 4, 5, 2, 7, 8} - node5Content := []uint64{1, 6, 8, 4, 5, 2, 7, 8, 9} - node6Content := []uint64{1, 2, 3, 4, 5, 2, 7, 10} - - exp1 := solid.NewUint64ListSSZFromSlice(math.MaxInt, node1Content) - exp2 := solid.NewUint64ListSSZFromSlice(math.MaxInt, node2Content) - exp3 := solid.NewUint64ListSSZFromSlice(math.MaxInt, node3Content) - exp4 := solid.NewUint64ListSSZFromSlice(math.MaxInt, node4Content) - exp5 := solid.NewUint64ListSSZFromSlice(math.MaxInt, node5Content) - exp6 := solid.NewUint64ListSSZFromSlice(math.MaxInt, node6Content) - - enc1, err := exp1.EncodeSSZ(nil) - require.NoError(t, err) - enc2, err := exp2.EncodeSSZ(nil) - require.NoError(t, err) - enc3, err := exp3.EncodeSSZ(nil) - require.NoError(t, err) - enc4, err := exp4.EncodeSSZ(nil) - require.NoError(t, err) - enc5, err := exp5.EncodeSSZ(nil) - require.NoError(t, err) - enc6, err := exp6.EncodeSSZ(nil) - require.NoError(t, err) - - diffStorage := NewChainDiffStorage(base_encoding.ComputeCompressedSerializedUint64ListDiff, base_encoding.ApplyCompressedSerializedUint64ListDiff) - diffStorage.Insert(node1, libcommon.Hash{}, nil, enc1, true) - diffStorage.Insert(node2, node1, enc1, enc2, false) - diffStorage.Insert(node3, node2, enc2, enc3, false) - diffStorage.Insert(node4, node3, enc3, enc4, false) - diffStorage.Insert(node5, node4, enc4, enc5, false) - diffStorage.Insert(node6, node2, enc2, enc6, false) - - d1, err := diffStorage.Get(node1) - require.NoError(t, err) - require.Equal(t, enc1, d1) - - d2, err := diffStorage.Get(node2) - require.NoError(t, err) - require.Equal(t, enc2, d2) - - d3, err := diffStorage.Get(node3) - require.NoError(t, err) - require.Equal(t, enc3, d3) - - d4, err := diffStorage.Get(node4) - require.NoError(t, err) - require.Equal(t, enc4, d4) - - d5, err := diffStorage.Get(node5) - require.NoError(t, err) - require.Equal(t, enc5, d5) - - d6, err := diffStorage.Get(node6) - require.NoError(t, err) - require.Equal(t, enc6, d6) -} diff --git a/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go b/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go index bcef69833ed..3068031bebb 100644 --- a/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go +++ b/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go @@ -23,7 +23,6 @@ import ( "sync" "sync/atomic" - "github.com/klauspost/compress/zstd" "github.com/spf13/afero" libcommon "github.com/erigontech/erigon-lib/common" @@ -34,9 +33,7 @@ import ( "github.com/erigontech/erigon/cl/cltypes" "github.com/erigontech/erigon/cl/cltypes/lightclient_utils" "github.com/erigontech/erigon/cl/cltypes/solid" - "github.com/erigontech/erigon/cl/persistence/base_encoding" "github.com/erigontech/erigon/cl/phase1/core/state" - diffstorage "github.com/erigontech/erigon/cl/phase1/forkchoice/fork_graph/diff_storage" "github.com/erigontech/erigon/cl/transition" "github.com/erigontech/erigon/cl/transition/impl/eth2" ) @@ -48,26 +45,6 @@ type syncCommittees struct { nextSyncCommittee *solid.SyncCommittee } -var compressorPool = sync.Pool{ - New: func() interface{} { - w, err := zstd.NewWriter(nil) - if err != nil { - panic(err) - } - return w - }, -} - -var decompressPool = sync.Pool{ - New: func() interface{} { - r, err := zstd.NewReader(nil) - if err != nil { - panic(err) - } - return r - }, -} - var ErrStateNotFound = errors.New("state not found") type ChainSegmentInsertionResult uint @@ -132,12 +109,9 @@ type forkGraphDisk struct { // for each block root we keep track of the sync committees for head retrieval. syncCommittees sync.Map lightclientBootstraps sync.Map - // diffs storage - balancesStorage *diffstorage.ChainDiffStorage - validatorSetStorage *diffstorage.ChainDiffStorage - inactivityScoresStorage *diffstorage.ChainDiffStorage - previousIndicies sync.Map - currentIndicies sync.Map + + previousIndicies sync.Map + currentIndicies sync.Map // configurations beaconCfg *clparams.BeaconChainConfig @@ -172,23 +146,16 @@ func NewForkGraphDisk(anchorState *state.CachingBeaconState, aferoFs afero.Fs, r farthestExtendingPath[anchorRoot] = true - balancesStorage := diffstorage.NewChainDiffStorage(base_encoding.ComputeCompressedSerializedUint64ListDiff, base_encoding.ApplyCompressedSerializedUint64ListDiff) - validatorSetStorage := diffstorage.NewChainDiffStorage(base_encoding.ComputeCompressedSerializedValidatorSetListDiff, base_encoding.ApplyCompressedSerializedValidatorListDiff) - inactivityScoresStorage := diffstorage.NewChainDiffStorage(base_encoding.ComputeCompressedSerializedUint64ListDiff, base_encoding.ApplyCompressedSerializedUint64ListDiff) - f := &forkGraphDisk{ fs: aferoFs, // current state data currentState: anchorState, // configuration - beaconCfg: anchorState.BeaconConfig(), - genesisTime: anchorState.GenesisTime(), - anchorSlot: anchorState.Slot(), - balancesStorage: balancesStorage, - validatorSetStorage: validatorSetStorage, - inactivityScoresStorage: inactivityScoresStorage, - rcfg: rcfg, - emitter: emitter, + beaconCfg: anchorState.BeaconConfig(), + genesisTime: anchorState.GenesisTime(), + anchorSlot: anchorState.Slot(), + rcfg: rcfg, + emitter: emitter, } f.lowestAvailableBlock.Store(anchorState.Slot()) f.headers.Store(libcommon.Hash(anchorRoot), &anchorHeader) @@ -280,13 +247,7 @@ func (f *forkGraphDisk) AddChainSegment(signedBlock *cltypes.SignedBeaconBlock, } blockRewardsCollector := ð2.BlockRewardsCollector{} - var prevDumpBalances, prevValidatorSetDump, prevInactivityScores []byte - epochCross := newState.Slot()/f.beaconCfg.SlotsPerEpoch != block.Slot/f.beaconCfg.SlotsPerEpoch - if (f.rcfg.Beacon || f.rcfg.Validator || f.rcfg.Lighthouse) && !epochCross { - prevDumpBalances = libcommon.Copy(newState.RawBalances()) - prevValidatorSetDump = libcommon.Copy(newState.RawValidatorSet()) - prevInactivityScores = libcommon.Copy(newState.RawInactivityScores()) - } + // Execute the state if invalidBlockErr := transition.TransitionState(newState, signedBlock, blockRewardsCollector, fullValidation); invalidBlockErr != nil { // Add block to list of invalid blocks @@ -302,11 +263,9 @@ func (f *forkGraphDisk) AddChainSegment(signedBlock *cltypes.SignedBeaconBlock, if block.Version() != clparams.Phase0Version { f.currentIndicies.Store(libcommon.Hash(blockRoot), libcommon.Copy(newState.RawCurrentEpochParticipation())) f.previousIndicies.Store(libcommon.Hash(blockRoot), libcommon.Copy(newState.RawPreviousEpochParticipation())) - f.inactivityScoresStorage.Insert(libcommon.Hash(blockRoot), block.ParentRoot, prevInactivityScores, newState.RawInactivityScores(), epochCross) } f.blockRewards.Store(libcommon.Hash(blockRoot), blockRewardsCollector) - f.balancesStorage.Insert(libcommon.Hash(blockRoot), block.ParentRoot, prevDumpBalances, newState.RawBalances(), epochCross) - f.validatorSetStorage.Insert(libcommon.Hash(blockRoot), block.ParentRoot, prevValidatorSetDump, newState.RawValidatorSet(), epochCross) + period := f.beaconCfg.SyncCommitteePeriod(newState.Slot()) f.syncCommittees.Store(period, syncCommittees{ currentSyncCommittee: newState.CurrentSyncCommittee().Copy(), @@ -474,9 +433,7 @@ func (f *forkGraphDisk) Prune(pruneSlot uint64) (err error) { f.blockRewards.Delete(root) f.fs.Remove(getBeaconStateFilename(root)) f.fs.Remove(getBeaconStateCacheFilename(root)) - f.balancesStorage.Delete(root) - f.validatorSetStorage.Delete(root) - f.inactivityScoresStorage.Delete(root) + f.previousIndicies.Delete(root) f.currentIndicies.Delete(root) } @@ -529,27 +486,25 @@ func (f *forkGraphDisk) GetLightClientUpdate(period uint64) (*cltypes.LightClien } func (f *forkGraphDisk) GetBalances(blockRoot libcommon.Hash) (solid.Uint64ListSSZ, error) { - b, err := f.balancesStorage.Get(blockRoot) + st, err := f.GetState(blockRoot, true) if err != nil { return nil, err } - if len(b) == 0 { - return nil, nil + if st == nil { + return nil, ErrStateNotFound } - out := solid.NewUint64ListSSZ(int(f.beaconCfg.ValidatorRegistryLimit)) - return out, out.DecodeSSZ(b, 0) + return st.Balances(), nil } func (f *forkGraphDisk) GetInactivitiesScores(blockRoot libcommon.Hash) (solid.Uint64ListSSZ, error) { - b, err := f.inactivityScoresStorage.Get(blockRoot) + st, err := f.GetState(blockRoot, true) if err != nil { return nil, err } - if len(b) == 0 { - return nil, nil + if st == nil { + return nil, ErrStateNotFound } - out := solid.NewUint64ListSSZ(int(f.beaconCfg.ValidatorRegistryLimit)) - return out, out.DecodeSSZ(b, 0) + return st.InactivityScores(), nil } func (f *forkGraphDisk) GetPreviousParticipationIndicies(blockRoot libcommon.Hash) (*solid.ParticipationBitList, error) { @@ -577,13 +532,12 @@ func (f *forkGraphDisk) GetCurrentParticipationIndicies(blockRoot libcommon.Hash } func (f *forkGraphDisk) GetValidatorSet(blockRoot libcommon.Hash) (*solid.ValidatorSet, error) { - b, err := f.validatorSetStorage.Get(blockRoot) + st, err := f.GetState(blockRoot, true) if err != nil { return nil, err } - if len(b) == 0 { - return nil, nil + if st == nil { + return nil, ErrStateNotFound } - out := solid.NewValidatorSet(int(f.beaconCfg.ValidatorRegistryLimit)) - return out, out.DecodeSSZ(b, 0) + return st.ValidatorSet(), nil } diff --git a/cl/phase1/forkchoice/fork_graph/fork_graph_disk_fs.go b/cl/phase1/forkchoice/fork_graph/fork_graph_disk_fs.go index 902426d7801..11a8bc001d1 100644 --- a/cl/phase1/forkchoice/fork_graph/fork_graph_disk_fs.go +++ b/cl/phase1/forkchoice/fork_graph/fork_graph_disk_fs.go @@ -24,7 +24,6 @@ import ( "os" "github.com/golang/snappy" - "github.com/klauspost/compress/zstd" "github.com/spf13/afero" libcommon "github.com/erigontech/erigon-lib/common" @@ -94,12 +93,7 @@ func (f *forkGraphDisk) readBeaconStateFromDisk(blockRoot libcommon.Hash) (bs *s } defer cacheFile.Close() - reader := decompressPool.Get().(*zstd.Decoder) - defer decompressPool.Put(reader) - - reader.Reset(cacheFile) - - if err := bs.DecodeCaches(reader); err != nil { + if err := bs.DecodeCaches(cacheFile); err != nil { return nil, err } @@ -162,19 +156,13 @@ func (f *forkGraphDisk) DumpBeaconStateOnDisk(blockRoot libcommon.Hash, bs *stat } defer cacheFile.Close() - writer := compressorPool.Get().(*zstd.Encoder) - defer compressorPool.Put(writer) - - writer.Reset(cacheFile) - defer writer.Close() - - if err := bs.EncodeCaches(writer); err != nil { + if err := bs.EncodeCaches(cacheFile); err != nil { return err } - if err = writer.Close(); err != nil { + + if err = cacheFile.Sync(); err != nil { return } - err = cacheFile.Sync() return } From 5f65bb8418e15f787b31fdb34cd5e8500c741912 Mon Sep 17 00:00:00 2001 From: Dmytro Vovk Date: Wed, 6 Nov 2024 07:03:52 +0000 Subject: [PATCH 16/28] Strongerrunner (#12602) --- .github/workflows/test-integration.yml | 3 ++- tests/block_test.go | 1 + tests/difficulty_test.go | 2 -- tests/exec_spec_test.go | 1 + tests/init_test.go | 2 -- tests/rlp_test.go | 1 - tests/state_test.go | 2 +- tests/transaction_test.go | 3 +-- 8 files changed, 6 insertions(+), 9 deletions(-) diff --git a/.github/workflows/test-integration.yml b/.github/workflows/test-integration.yml index c7462a8d262..f47c4b0dcaa 100644 --- a/.github/workflows/test-integration.yml +++ b/.github/workflows/test-integration.yml @@ -24,6 +24,7 @@ jobs: os: - ubuntu-22.04 - macos-14 + - ubuntu-latest-erigontests-large runs-on: ${{ matrix.os }} steps: @@ -37,7 +38,7 @@ jobs: run: sudo apt update && sudo apt install build-essential - name: test-integration - run: make test-integration + run: GOGC=50 make test-integration tests-windows: strategy: diff --git a/tests/block_test.go b/tests/block_test.go index 269dde4ab78..3003fd707a0 100644 --- a/tests/block_test.go +++ b/tests/block_test.go @@ -53,6 +53,7 @@ func TestBlockchain(t *testing.T) { checkStateRoot := true bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) { + t.Parallel() // import pre accounts & construct test genesis block & state root if err := bt.checkFailure(t, test.Run(t, checkStateRoot)); err != nil { t.Error(err) diff --git a/tests/difficulty_test.go b/tests/difficulty_test.go index daab9e53e8e..c301d94ff70 100644 --- a/tests/difficulty_test.go +++ b/tests/difficulty_test.go @@ -28,8 +28,6 @@ import ( ) func TestDifficulty(t *testing.T) { - //t.Parallel() - dt := new(testMatcher) dt.walk(t, difficultyTestDir, func(t *testing.T, name string, superTest map[string]json.RawMessage) { diff --git a/tests/exec_spec_test.go b/tests/exec_spec_test.go index d8ea375f79a..e93e660f97b 100644 --- a/tests/exec_spec_test.go +++ b/tests/exec_spec_test.go @@ -35,6 +35,7 @@ func TestExecutionSpec(t *testing.T) { checkStateRoot := true bt.walk(t, dir, func(t *testing.T, name string, test *BlockTest) { + t.Parallel() // import pre accounts & construct test genesis block & state root if err := bt.checkFailure(t, test.Run(t, checkStateRoot)); err != nil { t.Error(err) diff --git a/tests/init_test.go b/tests/init_test.go index a3a28f110f1..27bbcda2ea1 100644 --- a/tests/init_test.go +++ b/tests/init_test.go @@ -228,7 +228,6 @@ func (tm *testMatcher) runTestFile(t *testing.T, path, name string, runTest inte t.Skip("Skipped by whitelist") } } - //t.Parallel() // Load the file as map[string]. m := makeMapFromTestFunc(runTest) @@ -289,7 +288,6 @@ func runTestFunc(runTest interface{}, t *testing.T, name string, m reflect.Value } func TestMatcherWhitelist(t *testing.T) { - //t.Parallel() tm := new(testMatcher) tm.whitelist("invalid*") tm.walk(t, rlpTestDir, func(t *testing.T, name string, test *RLPTest) { diff --git a/tests/rlp_test.go b/tests/rlp_test.go index f6a907b2ade..25abe33f7e6 100644 --- a/tests/rlp_test.go +++ b/tests/rlp_test.go @@ -26,7 +26,6 @@ import ( ) func TestRLP(t *testing.T) { - //t.Parallel() tm := new(testMatcher) tm.walk(t, rlpTestDir, func(t *testing.T, name string, test *RLPTest) { if err := tm.checkFailure(t, test.Run()); err != nil { diff --git a/tests/state_test.go b/tests/state_test.go index 7a5f9b93ddb..7199c444aac 100644 --- a/tests/state_test.go +++ b/tests/state_test.go @@ -39,12 +39,12 @@ import ( ) func TestState(t *testing.T) { + t.Parallel() defer log.Root().SetHandler(log.Root().GetHandler()) log.Root().SetHandler(log.LvlFilterHandler(log.LvlError, log.StderrHandler)) if runtime.GOOS == "windows" { t.Skip("fix me on win please") // it's too slow on win and stops on macos, need generally improve speed of this tests } - //t.Parallel() st := new(testMatcher) diff --git a/tests/transaction_test.go b/tests/transaction_test.go index 1b3ffd32837..af2b25d0a7b 100644 --- a/tests/transaction_test.go +++ b/tests/transaction_test.go @@ -28,8 +28,6 @@ import ( ) func TestTransaction(t *testing.T) { - //t.Parallel() - txt := new(testMatcher) // We don't allow more than uint64 in gas amount @@ -38,6 +36,7 @@ func TestTransaction(t *testing.T) { txt.skipLoad("^ttGasLimit/TransactionWithGasLimitxPriceOverflow.json") txt.walk(t, transactionTestDir, func(t *testing.T, name string, test *TransactionTest) { + t.Parallel() cfg := params.MainnetChainConfig if err := txt.checkFailure(t, test.Run(cfg.ChainID)); err != nil { t.Error(err) From f0af9016d836300f91c0e376b5b49fdc78fd6494 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Wed, 6 Nov 2024 14:04:04 +0700 Subject: [PATCH 17/28] jwt dep up (#12635) dependabot recommended --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ff79c4af55e..e3aef287de0 100644 --- a/go.mod +++ b/go.mod @@ -42,7 +42,7 @@ require ( github.com/go-test/deep v1.1.1 github.com/goccy/go-json v0.9.11 github.com/gofrs/flock v0.12.1 - github.com/golang-jwt/jwt/v4 v4.5.0 + github.com/golang-jwt/jwt/v4 v4.5.1 github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb github.com/google/btree v1.1.3 github.com/google/cel-go v0.18.2 diff --git a/go.sum b/go.sum index 6661c7a2b0c..021e462064e 100644 --- a/go.sum +++ b/go.sum @@ -354,8 +354,8 @@ github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= +github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= From 3d01204109c48481158f3065befe4f895458f32d Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Wed, 6 Nov 2024 14:16:22 +0700 Subject: [PATCH 18/28] [rpc-test] enable `debug_traceTransaction` (#12638) --- .github/workflows/qa-rpc-integration-tests.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/qa-rpc-integration-tests.yml b/.github/workflows/qa-rpc-integration-tests.yml index 415d0cdf961..ecbcb404a35 100644 --- a/.github/workflows/qa-rpc-integration-tests.yml +++ b/.github/workflows/qa-rpc-integration-tests.yml @@ -104,7 +104,6 @@ jobs: # to investigate debug_traceBlockByHash,\ debug_traceCallMany/test_02.tar,debug_traceCallMany/test_04.tar,debug_traceCallMany/test_05.tar,debug_traceCallMany/test_06.tar,debug_traceCallMany/test_07.tar,debug_traceCallMany/test_09.json,debug_traceCallMany/test_10.tar,\ - debug_traceTransaction,\ engine_exchangeCapabilities/test_1.json,\ engine_exchangeTransitionConfigurationV1/test_01.json,\ engine_getClientVersionV1/test_1.json,\ From f948fb4a3b2487924e480f54c1e10d320d0c7ccb Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Wed, 6 Nov 2024 15:16:33 +0700 Subject: [PATCH 19/28] `eth_getLogs` to fix `fee cap less than block base fee` error (#12640) --- .github/workflows/lint.yml | 2 +- .github/workflows/qa-rpc-integration-tests.yml | 5 ++--- cmd/state/exec3/trace_worker.go | 5 +++-- core/vm/evm.go | 5 +++++ go.mod | 2 +- turbo/jsonrpc/eth_receipts.go | 4 ---- 6 files changed, 12 insertions(+), 11 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 3efd756d7e5..31cb5b4a28d 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -26,7 +26,7 @@ jobs: fetch-depth: 0 - uses: actions/setup-go@v5 with: - go-version: '1.22' + go-version: '1.23' - name: Install golangci-lint if: runner.os == 'Linux' diff --git a/.github/workflows/qa-rpc-integration-tests.yml b/.github/workflows/qa-rpc-integration-tests.yml index ecbcb404a35..bc6def09f5a 100644 --- a/.github/workflows/qa-rpc-integration-tests.yml +++ b/.github/workflows/qa-rpc-integration-tests.yml @@ -87,6 +87,8 @@ jobs: # Run RPC integration test runner via http python3 ./run_tests.py -p 8545 --continue -f --json-diff -x \ +# Erigon2 and Erigon3 never supported this api methods + trace_rawTransaction,\ # false positives: Erigon return expected response. but rpc-test somehow doesn't see 1 field. erigon_getHeaderByHash,erigon_getHeaderByNumber,eth_feeHistory,\ # total difficulty field was removed, then added back @@ -109,9 +111,6 @@ jobs: engine_getClientVersionV1/test_1.json,\ erigon_getBalanceChangesInBlock,\ eth_createAccessList/test_16.json,\ - trace_filter/test_16.json,\ - trace_rawTransaction/test_01.json,\ - trace_rawTransaction/test_03.json,\ admin_nodeInfo/test_01.json,\ admin_peers/test_01.json,\ erigon_nodeInfo/test_1.json,\ diff --git a/cmd/state/exec3/trace_worker.go b/cmd/state/exec3/trace_worker.go index 7c9ceeb8e79..7b80c49992b 100644 --- a/cmd/state/exec3/trace_worker.go +++ b/cmd/state/exec3/trace_worker.go @@ -74,11 +74,12 @@ func NewTraceWorker(tx kv.TemporalTx, cc *chain.Config, engine consensus.EngineR stateReader: stateReader, tracer: tracer, evm: vm.NewEVM(evmtypes.BlockContext{}, evmtypes.TxContext{}, nil, cc, vm.Config{}), - vmConfig: &vm.Config{}, + vmConfig: &vm.Config{NoBaseFee: true}, ibs: state.New(stateReader), } if tracer != nil { - ie.vmConfig = &vm.Config{Debug: true, Tracer: tracer} + ie.vmConfig.Debug = true + ie.vmConfig.Tracer = tracer } return ie } diff --git a/core/vm/evm.go b/core/vm/evm.go index 1ec0c0ff645..fb46915fed3 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -134,6 +134,11 @@ func (evm *EVM) Reset(txCtx evmtypes.TxContext, ibs evmtypes.IntraBlockState) { } func (evm *EVM) ResetBetweenBlocks(blockCtx evmtypes.BlockContext, txCtx evmtypes.TxContext, ibs evmtypes.IntraBlockState, vmConfig Config, chainRules *chain.Rules) { + if vmConfig.NoBaseFee { + if txCtx.GasPrice.IsZero() { + blockCtx.BaseFee = new(uint256.Int) + } + } evm.Context = blockCtx evm.TxContext = txCtx evm.intraBlockState = ibs diff --git a/go.mod b/go.mod index e3aef287de0..be8789ea804 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,6 @@ require ( github.com/Giulio2002/bls v0.0.0-20241013174947-019133587795 github.com/Masterminds/sprig/v3 v3.2.3 github.com/RoaringBitmap/roaring v1.9.4 - github.com/alecthomas/atomic v0.1.0-alpha2 github.com/alecthomas/kong v0.8.1 github.com/anacrolix/sync v0.5.1 github.com/anacrolix/torrent v1.52.6-0.20231201115409-7ea994b6bbd8 @@ -107,6 +106,7 @@ require ( ) require ( + github.com/alecthomas/atomic v0.1.0-alpha2 // indirect github.com/elastic/go-freelru v0.13.0 // indirect github.com/erigontech/speedtest v0.0.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect diff --git a/turbo/jsonrpc/eth_receipts.go b/turbo/jsonrpc/eth_receipts.go index 3dbb7b5b69a..ba23694e56a 100644 --- a/turbo/jsonrpc/eth_receipts.go +++ b/turbo/jsonrpc/eth_receipts.go @@ -303,10 +303,6 @@ func (api *BaseAPI) getLogsV3(ctx context.Context, tx kv.TemporalTx, begin, end continue } blockHash = header.Hash() - - if err != nil { - return nil, err - } exec.ChangeBlock(header) } From 821f3c1c5694d6bffec8389748f3196e7b0e9abf Mon Sep 17 00:00:00 2001 From: lystopad Date: Wed, 6 Nov 2024 09:38:52 +0100 Subject: [PATCH 20/28] Cleanup old snapshot docker images (main-xxxxxxx). (#12610) Keep only latest 100 docker images with tag matching pattern main-XXXXXXX --- .../ci-cd-main-branch-docker-images.yml | 35 +++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci-cd-main-branch-docker-images.yml b/.github/workflows/ci-cd-main-branch-docker-images.yml index bdfc1e6a883..b476d0db4c9 100644 --- a/.github/workflows/ci-cd-main-branch-docker-images.yml +++ b/.github/workflows/ci-cd-main-branch-docker-images.yml @@ -9,6 +9,7 @@ env: CHECKOUT_REF: "main" DOCKERHUB_REPOSITORY: "erigontech/erigon" LABEL_DESCRIPTION: "[docker image built on a last commit id from the main branch] Erigon is an implementation of Ethereum (execution layer with embeddable consensus layer), on the efficiency frontier. Archive Node by default." + KEEP_IMAGES: 100 on: push: @@ -127,7 +128,7 @@ jobs: --push \ --platform linux/amd64,linux/arm64 . - - name: export and print docker build tag + - name: export and print docker build tag, cleanup old docker images id: built_tag_export env: BUILD_VERSION: "main-${{ steps.getCommitId.outputs.short_commit_id }}" @@ -136,6 +137,36 @@ jobs: echo The following docker images have been published: echo "${{ env.DOCKERHUB_REPOSITORY }}:main-${{ env.BUILD_VERSION }}" echo "${{ env.DOCKERHUB_REPOSITORY }}:main-latest" + echo + echo "Cleanup old docker images matching pattern tag ~= main-XXXXXXX" + curl_cmd="curl -s -H \"Authorization: JWT ${{ secrets.ORG_DOCKERHUB_ERIGONTECH_TOKEN }}\" " + dockerhub_url='https://hub.docker.com/v2/namespaces/erigontech/repositories/erigon' + my_list () { + # First page: + next_page="$dockerhub_url/tags?page=1&page_size=100" + while [ "$next_page" != "null" ] + do + # Print tags and push dates for tags matching "main-": + $curl_cmd $next_page | jq -r '.results|.[]|.name + " " + .tag_last_pushed' | grep 'main-' + next_page=`$curl_cmd $next_page | jq '.next' | sed -e 's/^\"//' -e 's/\"$//'` + done + } + + my_list | tail -n+${{ env.KEEP_IMAGES }} | while read line; do + echo -n "Removing docker image/published - $line " + current_image=$(echo $line | sed -e 's/^\(main-.\{7\}\) .*/\1/') + output_code=$(curl --write-out %{http_code} --output curl-output.log \ + -s -X DELETE -H "Accept: application/json" \ + -H "Authorization: JWT ${{ secrets.ORG_DOCKERHUB_ERIGONTECH_TOKEN }}" \ + https://hub.docker.com/v2/repositories/erigontech/erigon/tags/${current_image} ) + if [ $output_code -ne 204 ]; then + echo "ERROR: failed to remove docker image erigon:${current_image}" + echo "ERROR: API response: $(cat curl-output.log)." + else + echo -n " - removed. " + fi + echo "Done." + done run-kurtosis-assertoor: needs: [define_matrix, Build] @@ -143,4 +174,4 @@ jobs: with: checkout_ref: ${{ github.sha }} os: ${{ needs.define_matrix.outputs.os }} - docker_build_tag: ${{ needs.Build.outputs.docker_build_tag }} \ No newline at end of file + docker_build_tag: ${{ needs.Build.outputs.docker_build_tag }} From 063147285ea282451d9098bcf14275f735042c68 Mon Sep 17 00:00:00 2001 From: Dmytro Vovk Date: Wed, 6 Nov 2024 10:24:27 +0000 Subject: [PATCH 21/28] Strongerrunner (#12647) Updated test names --- .github/workflows/test-erigon-is-library.yml | 2 +- .github/workflows/test-integration.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test-erigon-is-library.yml b/.github/workflows/test-erigon-is-library.yml index e165dca93b7..50a20f0e224 100644 --- a/.github/workflows/test-erigon-is-library.yml +++ b/.github/workflows/test-erigon-is-library.yml @@ -1,4 +1,4 @@ -name: Integration tests +name: Library integration tests on: push: branches: diff --git a/.github/workflows/test-integration.yml b/.github/workflows/test-integration.yml index f47c4b0dcaa..b90dff0d477 100644 --- a/.github/workflows/test-integration.yml +++ b/.github/workflows/test-integration.yml @@ -1,4 +1,4 @@ -name: Integration tests +name: Erigon integration tests on: push: branches: From 82af95424f3b699de4a889428c7be5ab1c6e2e55 Mon Sep 17 00:00:00 2001 From: Dmytro Vovk Date: Wed, 6 Nov 2024 12:05:30 +0000 Subject: [PATCH 22/28] revert changes (#12649) Revert parallel test execution cause gitlab runner is not strong enough (https://github.com/erigontech/erigon/issues/12644) --- .github/workflows/test-erigon-is-library.yml | 2 +- .github/workflows/test-integration.yml | 5 ++--- Makefile | 2 +- tests/block_test.go | 1 - tests/exec_spec_test.go | 1 - tests/state_test.go | 1 - 6 files changed, 4 insertions(+), 8 deletions(-) diff --git a/.github/workflows/test-erigon-is-library.yml b/.github/workflows/test-erigon-is-library.yml index 50a20f0e224..e165dca93b7 100644 --- a/.github/workflows/test-erigon-is-library.yml +++ b/.github/workflows/test-erigon-is-library.yml @@ -1,4 +1,4 @@ -name: Library integration tests +name: Integration tests on: push: branches: diff --git a/.github/workflows/test-integration.yml b/.github/workflows/test-integration.yml index b90dff0d477..c7462a8d262 100644 --- a/.github/workflows/test-integration.yml +++ b/.github/workflows/test-integration.yml @@ -1,4 +1,4 @@ -name: Erigon integration tests +name: Integration tests on: push: branches: @@ -24,7 +24,6 @@ jobs: os: - ubuntu-22.04 - macos-14 - - ubuntu-latest-erigontests-large runs-on: ${{ matrix.os }} steps: @@ -38,7 +37,7 @@ jobs: run: sudo apt update && sudo apt install build-essential - name: test-integration - run: GOGC=50 make test-integration + run: make test-integration tests-windows: strategy: diff --git a/Makefile b/Makefile index 55e035f8362..aab922d9e06 100644 --- a/Makefile +++ b/Makefile @@ -65,7 +65,7 @@ GO_FLAGS += -ldflags "-X ${PACKAGE}/params.GitCommit=${GIT_COMMIT} -X ${PACKAGE} GOBUILD = ${CPU_ARCH} CGO_CFLAGS="$(CGO_CFLAGS)" CGO_LDFLAGS="$(CGO_LDFLAGS)" GOPRIVATE="$(GOPRIVATE)" $(GO) build $(GO_FLAGS) GO_DBG_BUILD = ${CPU_ARCH} CGO_CFLAGS="$(CGO_CFLAGS) -DMDBX_DEBUG=1" CGO_LDFLAGS="$(CGO_LDFLAGS)" GOPRIVATE="$(GOPRIVATE)" $(GO) build -tags $(BUILD_TAGS),debug -gcflags=all="-N -l" # see delve docs -GOTEST = ${CPU_ARCH} CGO_CFLAGS="$(CGO_CFLAGS)" CGO_LDFLAGS="$(CGO_LDFLAGS)" GOPRIVATE="$(GOPRIVATE)" GODEBUG=cgocheck=0 GOTRACEBACK=1 $(GO) test $(GO_FLAGS) ./... +GOTEST = ${CPU_ARCH} CGO_CFLAGS="$(CGO_CFLAGS)" CGO_LDFLAGS="$(CGO_LDFLAGS)" GOPRIVATE="$(GOPRIVATE)" GODEBUG=cgocheck=0 GOTRACEBACK=1 $(GO) test $(GO_FLAGS) ./... -p 2 default: all diff --git a/tests/block_test.go b/tests/block_test.go index 3003fd707a0..269dde4ab78 100644 --- a/tests/block_test.go +++ b/tests/block_test.go @@ -53,7 +53,6 @@ func TestBlockchain(t *testing.T) { checkStateRoot := true bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) { - t.Parallel() // import pre accounts & construct test genesis block & state root if err := bt.checkFailure(t, test.Run(t, checkStateRoot)); err != nil { t.Error(err) diff --git a/tests/exec_spec_test.go b/tests/exec_spec_test.go index e93e660f97b..d8ea375f79a 100644 --- a/tests/exec_spec_test.go +++ b/tests/exec_spec_test.go @@ -35,7 +35,6 @@ func TestExecutionSpec(t *testing.T) { checkStateRoot := true bt.walk(t, dir, func(t *testing.T, name string, test *BlockTest) { - t.Parallel() // import pre accounts & construct test genesis block & state root if err := bt.checkFailure(t, test.Run(t, checkStateRoot)); err != nil { t.Error(err) diff --git a/tests/state_test.go b/tests/state_test.go index 7199c444aac..9b308a99a57 100644 --- a/tests/state_test.go +++ b/tests/state_test.go @@ -39,7 +39,6 @@ import ( ) func TestState(t *testing.T) { - t.Parallel() defer log.Root().SetHandler(log.Root().GetHandler()) log.Root().SetHandler(log.LvlFilterHandler(log.LvlError, log.StderrHandler)) if runtime.GOOS == "windows" { From fb6f8f42f4b7521f8f5225477a04336af8365294 Mon Sep 17 00:00:00 2001 From: Michelangelo Riccobene Date: Wed, 6 Nov 2024 18:46:31 +0100 Subject: [PATCH 23/28] qa-tests: use external shell script in rpc tests (#12652) Using a shell script to start the python test is useful - to manually launch tests on debug machines and - to allow us to insert comments that would otherwise violate yaml rules into the workflow --- .../workflows/qa-rpc-integration-tests.yml | 46 +-------- .github/workflows/scripts/run_rpc_tests.sh | 98 +++++++++++++++++++ 2 files changed, 101 insertions(+), 43 deletions(-) create mode 100644 .github/workflows/scripts/run_rpc_tests.sh diff --git a/.github/workflows/qa-rpc-integration-tests.yml b/.github/workflows/qa-rpc-integration-tests.yml index bc6def09f5a..74cf9e0aca1 100644 --- a/.github/workflows/qa-rpc-integration-tests.yml +++ b/.github/workflows/qa-rpc-integration-tests.yml @@ -84,50 +84,10 @@ jobs: cd ${{ runner.workspace }}/rpc-tests/integration rm -rf ./mainnet/results/ - + # Run RPC integration test runner via http - python3 ./run_tests.py -p 8545 --continue -f --json-diff -x \ -# Erigon2 and Erigon3 never supported this api methods - trace_rawTransaction,\ -# false positives: Erigon return expected response. but rpc-test somehow doesn't see 1 field. - erigon_getHeaderByHash,erigon_getHeaderByNumber,eth_feeHistory,\ -# total difficulty field was removed, then added back - eth_getBlockByHash,eth_getBlockByNumber,\ -# Erigon bugs - debug_accountRange,debug_storageRangeAt,\ -# need update rpc-test - because Erigon is correct (@AskAlexSharov will do after https://github.com/erigontech/erigon/pull/12634) - debug_getModifiedAccountsByHash,debug_getModifiedAccountsByNumber,\ -# Erigon bug https://github.com/erigontech/erigon/issues/12603 - erigon_getLatestLogs,erigon_getLogsByHash/test_04.json,\ -# Erigon bug https://github.com/erigontech/erigon/issues/12637 - debug_traceBlockByNumber/test_05.tar,debug_traceBlockByNumber/test_08.tar,debug_traceBlockByNumber/test_09.tar,debug_traceBlockByNumber/test_10.tar,debug_traceBlockByNumber/test_11.tar,debug_traceBlockByNumber/test_12.tar,\ -# remove this line after https://github.com/erigontech/rpc-tests/pull/281 - parity_getBlockReceipts,\ -# to investigate - debug_traceBlockByHash,\ - debug_traceCallMany/test_02.tar,debug_traceCallMany/test_04.tar,debug_traceCallMany/test_05.tar,debug_traceCallMany/test_06.tar,debug_traceCallMany/test_07.tar,debug_traceCallMany/test_09.json,debug_traceCallMany/test_10.tar,\ - engine_exchangeCapabilities/test_1.json,\ - engine_exchangeTransitionConfigurationV1/test_01.json,\ - engine_getClientVersionV1/test_1.json,\ - erigon_getBalanceChangesInBlock,\ - eth_createAccessList/test_16.json,\ - admin_nodeInfo/test_01.json,\ - admin_peers/test_01.json,\ - erigon_nodeInfo/test_1.json,\ - eth_coinbase/test_01.json,\ - eth_getTransactionByHash/test_02.json,\ - eth_getWork/test_01.json,\ - eth_mining/test_01.json,\ - eth_protocolVersion/test_1.json,\ - eth_submitHashrate/test_1.json,\ - eth_submitWork/test_1.json,\ - net_peerCount/test_1.json,\ - net_version/test_1.json,\ - txpool_content/test_01.json,\ - txpool_status/test_1.json,\ - web3_clientVersion/test_1.json,\ - eth_estimateGas/test_14.json,\ - trace_replayBlockTransactions/test_29.tar + chmod +x ${{ runner.workspace }}/erigon/.github/workflows/scripts/run_rpc_tests.sh + ${{ runner.workspace }}/erigon/.github/workflows/scripts/run_rpc_tests.sh # Capture test runner script exit status test_exit_status=$? diff --git a/.github/workflows/scripts/run_rpc_tests.sh b/.github/workflows/scripts/run_rpc_tests.sh new file mode 100644 index 00000000000..0f94f68c039 --- /dev/null +++ b/.github/workflows/scripts/run_rpc_tests.sh @@ -0,0 +1,98 @@ +#!/bin/bash + +set +e # Disable exit on error + +# Array of disabled tests +disabled_tests=( + # Erigon2 and Erigon3 never supported this api methods + trace_rawTransaction + # false positives: Erigon return expected response. but rpc-test somehow doesn't see 1 field. + erigon_getHeaderByHash,erigon_getHeaderByNumber,eth_feeHistory + # total difficulty field was removed, then added back + eth_getBlockByHash,eth_getBlockByNumber + # Erigon bugs + debug_accountRange,debug_storageRangeAt + # need update rpc-test - because Erigon is correct (@AskAlexSharov will do after https://github.com/erigontech/erigon/pull/12634) + debug_getModifiedAccountsByHash,debug_getModifiedAccountsByNumber + # Erigon bug https://github.com/erigontech/erigon/issues/12603 + erigon_getLatestLogs,erigon_getLogsByHash/test_04.json + # Erigon bug https://github.com/erigontech/erigon/issues/12637 + debug_traceBlockByNumber/test_05.tar + debug_traceBlockByNumber/test_08.tar + debug_traceBlockByNumber/test_09.tar + debug_traceBlockByNumber/test_10.tar + debug_traceBlockByNumber/test_11.tar + debug_traceBlockByNumber/test_12.tar + # remove this line after https://github.com/erigontech/rpc-tests/pull/281 + parity_getBlockReceipts + # to investigate + debug_traceBlockByHash + debug_traceCallMany/test_02.tar + debug_traceCallMany/test_04.tar + debug_traceCallMany/test_05.tar + debug_traceCallMany/test_06.tar + debug_traceCallMany/test_07.tar + debug_traceCallMany/test_09.json + debug_traceCallMany/test_10.tar + engine_exchangeCapabilities/test_1.json + engine_exchangeTransitionConfigurationV1/test_01.json + engine_getClientVersionV1/test_1.json + erigon_getBalanceChangesInBlock + eth_createAccessList/test_16.json + admin_nodeInfo/test_01.json + admin_peers/test_01.json + erigon_nodeInfo/test_1.json + eth_coinbase/test_01.json + eth_getTransactionByHash/test_02.json + eth_getWork/test_01.json + eth_mining/test_01.json + eth_protocolVersion/test_1.json + eth_submitHashrate/test_1.json + eth_submitWork/test_1.json + net_peerCount/test_1.json + net_version/test_1.json + txpool_content/test_01.json + txpool_status/test_1.json + web3_clientVersion/test_1.json + eth_estimateGas/test_14.json + trace_replayBlockTransactions/test_29.tar + # recently started to fail + debug_traceTransaction/test_20.json + debug_traceTransaction/test_21.json + debug_traceTransaction/test_22.json + debug_traceTransaction/test_25.json + debug_traceTransaction/test_30.tar + debug_traceTransaction/test_33.json + debug_traceTransaction/test_35.tar + debug_traceTransaction/test_36.json + debug_traceTransaction/test_37.tar + debug_traceTransaction/test_38.tar + debug_traceTransaction/test_43.json + debug_traceTransaction/test_44.json + debug_traceTransaction/test_62.json + debug_traceTransaction/test_64.json + debug_traceTransaction/test_74.tar + debug_traceTransaction/test_75.tar + debug_traceTransaction/test_77.json + debug_traceTransaction/test_78.tar + debug_traceTransaction/test_79.tar + debug_traceTransaction/test_80.tar + debug_traceTransaction/test_81.tar + debug_traceTransaction/test_82.tar + debug_traceTransaction/test_83.tar + debug_traceTransaction/test_84.tar + debug_traceTransaction/test_85.tar + debug_traceTransaction/test_87.json + debug_traceTransaction/test_90.tar + debug_traceTransaction/test_91.tar + debug_traceTransaction/test_92.tar + debug_traceTransaction/test_93.json + debug_traceTransaction/test_96.json + trace_filter/test_16.json) + +# Transform the array into a comma-separated string +disabled_test_list=$(IFS=,; echo "${disabled_tests[*]}") + +python3 ./run_tests.py -p 8545 --continue -f --json-diff -x "$disabled_test_list" + +exit $? \ No newline at end of file From 4846740964d91316a2c01ead66bea2446718342b Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com> Date: Thu, 7 Nov 2024 08:26:00 +0100 Subject: [PATCH 24/28] rpcdaemon: fix on FeeHistory check on GetMaxBlobGasPerBlock() method of chainConfig (#12655) To read MaxBlobGasPerBlock it is necessary use the method GetMaxBlobGasPerBlock() that returns MAX_BLOB_GAS_PER_BLOCK (EIP-4844) if not override in config with custom value MaxBlobGasPerBlock --- eth/gasprice/feehistory.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/eth/gasprice/feehistory.go b/eth/gasprice/feehistory.go index 6fa642d7e58..114bf931f39 100644 --- a/eth/gasprice/feehistory.go +++ b/eth/gasprice/feehistory.go @@ -115,14 +115,16 @@ func (oracle *Oracle) processBlock(bf *blockFees, percentiles []float64) { bf.nextBlobBaseFee = new(big.Int) } bf.gasUsedRatio = float64(bf.header.GasUsed) / float64(bf.header.GasLimit) + + if blobGasUsed := bf.header.BlobGasUsed; blobGasUsed != nil && chainconfig.GetMaxBlobGasPerBlock() != 0 { + bf.blobGasUsedRatio = float64(*blobGasUsed) / float64(chainconfig.GetMaxBlobGasPerBlock()) + } + if len(percentiles) == 0 { // rewards were not requested, return null return } - if blobGasUsed := bf.header.BlobGasUsed; blobGasUsed != nil && chainconfig.MaxBlobGasPerBlock != nil { - bf.blobGasUsedRatio = float64(*blobGasUsed) / float64(*chainconfig.MaxBlobGasPerBlock) - } if bf.block == nil || (bf.receipts == nil && len(bf.block.Transactions()) != 0) { oracle.log.Error("Block or receipts are missing while reward percentiles are requested") return From cc86623f5cb80b7059914bccb74521cad508521b Mon Sep 17 00:00:00 2001 From: Sixtysixter <20945591+Sixtysixter@users.noreply.github.com> Date: Thu, 7 Nov 2024 08:26:26 +0100 Subject: [PATCH 25/28] PageToken ad PageSize in IndexRange and DomainRange functions (#12657) Fixed PageToken management in IndexRange and DomainRange functions Fixed PageSize management in remote db server --- erigon-lib/kv/remotedb/kv_remote.go | 6 ++-- .../kv/remotedbserver/remotedbserver.go | 32 ++++++++----------- 2 files changed, 17 insertions(+), 21 deletions(-) diff --git a/erigon-lib/kv/remotedb/kv_remote.go b/erigon-lib/kv/remotedb/kv_remote.go index c06022bb728..b6e7330ba1a 100644 --- a/erigon-lib/kv/remotedb/kv_remote.go +++ b/erigon-lib/kv/remotedb/kv_remote.go @@ -643,7 +643,7 @@ func (tx *tx) DomainGet(name kv.Domain, k, k2 []byte) (v []byte, step uint64, er func (tx *tx) DomainRange(name kv.Domain, fromKey, toKey []byte, ts uint64, asc order.By, limit int) (it stream.KV, err error) { return stream.PaginateKV(func(pageToken string) (keys, vals [][]byte, nextPageToken string, err error) { - reply, err := tx.db.remoteKV.DomainRange(tx.ctx, &remote.DomainRangeReq{TxId: tx.id, Table: name.String(), FromKey: fromKey, ToKey: toKey, Ts: ts, OrderAscend: bool(asc), Limit: int64(limit)}) + reply, err := tx.db.remoteKV.DomainRange(tx.ctx, &remote.DomainRangeReq{TxId: tx.id, Table: name.String(), FromKey: fromKey, ToKey: toKey, Ts: ts, OrderAscend: bool(asc), Limit: int64(limit), PageToken: pageToken}) if err != nil { return nil, nil, "", err } @@ -659,7 +659,7 @@ func (tx *tx) HistorySeek(name kv.History, k []byte, ts uint64) (v []byte, ok bo } func (tx *tx) HistoryRange(name kv.History, fromTs, toTs int, asc order.By, limit int) (it stream.KV, err error) { return stream.PaginateKV(func(pageToken string) (keys, vals [][]byte, nextPageToken string, err error) { - reply, err := tx.db.remoteKV.HistoryRange(tx.ctx, &remote.HistoryRangeReq{TxId: tx.id, Table: string(name), FromTs: int64(fromTs), ToTs: int64(toTs), OrderAscend: bool(asc), Limit: int64(limit)}) + reply, err := tx.db.remoteKV.HistoryRange(tx.ctx, &remote.HistoryRangeReq{TxId: tx.id, Table: string(name), FromTs: int64(fromTs), ToTs: int64(toTs), OrderAscend: bool(asc), Limit: int64(limit), PageToken: pageToken}) if err != nil { return nil, nil, "", err } @@ -669,7 +669,7 @@ func (tx *tx) HistoryRange(name kv.History, fromTs, toTs int, asc order.By, limi func (tx *tx) IndexRange(name kv.InvertedIdx, k []byte, fromTs, toTs int, asc order.By, limit int) (timestamps stream.U64, err error) { return stream.PaginateU64(func(pageToken string) (arr []uint64, nextPageToken string, err error) { - req := &remote.IndexRangeReq{TxId: tx.id, Table: string(name), K: k, FromTs: int64(fromTs), ToTs: int64(toTs), OrderAscend: bool(asc), Limit: int64(limit)} + req := &remote.IndexRangeReq{TxId: tx.id, Table: string(name), K: k, FromTs: int64(fromTs), ToTs: int64(toTs), OrderAscend: bool(asc), Limit: int64(limit), PageToken: pageToken} reply, err := tx.db.remoteKV.IndexRange(tx.ctx, req) if err != nil { return nil, "", err diff --git a/erigon-lib/kv/remotedbserver/remotedbserver.go b/erigon-lib/kv/remotedbserver/remotedbserver.go index 1450ccbb1b5..a4d4b493945 100644 --- a/erigon-lib/kv/remotedbserver/remotedbserver.go +++ b/erigon-lib/kv/remotedbserver/remotedbserver.go @@ -605,15 +605,13 @@ func (s *KvServer) IndexRange(_ context.Context, req *remote.IndexRangeReq) (*re } reply.Timestamps = append(reply.Timestamps, v) limit-- - } - if len(reply.Timestamps) == int(req.PageSize) && it.HasNext() { - next, err := it.Next() - if err != nil { - return err - } - reply.NextPageToken, err = marshalPagination(&remote.IndexPagination{NextTimeStamp: int64(next), Limit: int64(limit)}) - if err != nil { - return err + + if len(reply.Timestamps) == int(req.PageSize) && it.HasNext() { + reply.NextPageToken, err = marshalPagination(&remote.IndexPagination{NextTimeStamp: int64(v), Limit: int64(limit)}) + if err != nil { + return err + } + break } } return nil @@ -691,15 +689,13 @@ func (s *KvServer) DomainRange(_ context.Context, req *remote.DomainRangeReq) (* reply.Keys = append(reply.Keys, key) reply.Values = append(reply.Values, value) limit-- - } - if len(reply.Keys) == int(req.PageSize) && it.HasNext() { - nextK, _, err := it.Next() - if err != nil { - return err - } - reply.NextPageToken, err = marshalPagination(&remote.PairsPagination{NextKey: nextK, Limit: int64(limit)}) - if err != nil { - return err + + if len(reply.Keys) == int(req.PageSize) && it.HasNext() { + reply.NextPageToken, err = marshalPagination(&remote.PairsPagination{NextKey: k, Limit: int64(limit)}) + if err != nil { + return err + } + break } } return nil From 6975dcf73820728cc8ccf8cad95fd23ef641abe3 Mon Sep 17 00:00:00 2001 From: Michele Modolo <70838029+michelemodolo@users.noreply.github.com> Date: Thu, 7 Nov 2024 15:50:01 +0700 Subject: [PATCH 26/28] Add test-integration-erigon.yml (#12660) This PR adds test-integration-erigon.yml, which takes over test-integration.yml --------- Co-authored-by: Michele Modolo --- .github/workflows/test-integration-erigon.yml | 77 +++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 .github/workflows/test-integration-erigon.yml diff --git a/.github/workflows/test-integration-erigon.yml b/.github/workflows/test-integration-erigon.yml new file mode 100644 index 00000000000..ec25c2026cb --- /dev/null +++ b/.github/workflows/test-integration-erigon.yml @@ -0,0 +1,77 @@ +name: Integration tests - ERIGON +on: + push: + branches: + - test-integration-michele + - main + - 'release/**' + pull_request: + branches: + - main + types: + - opened + - reopened + - synchronize + - ready_for_review + schedule: + - cron: '20 16 * * *' # daily at 16:20 UTC + workflow_dispatch: + +jobs: + tests-mac-linux: + strategy: + matrix: + # list of os: https://github.com/actions/virtual-environments + os: + - ubuntu-22.04 + - macos-14 + - ubuntu-latest-erigontests-large + runs-on: ${{ matrix.os }} + + steps: + - name: declaring runners + run: | + set +x + echo "I am being served by this runner: $RUNNER_NAME" + - uses: actions/checkout@v4 + - run: git submodule update --init --recursive --force + - uses: actions/setup-go@v5 + with: + go-version: '1.23' + cache: ${{ contains(fromJSON('["refs/heads/main","refs/heads/release/2.60","refs/heads/release/2.61"]'), github.ref) }} + - name: Install dependencies on Linux + if: runner.os == 'Linux' + run: sudo apt update && sudo apt install build-essential + - name: test-integration + run: make test-integration + + tests-windows: + strategy: + matrix: + os: [ windows-2022 ] + runs-on: ${{ matrix.os }} + + steps: + - uses: actions/checkout@v4 + - run: git submodule update --init --recursive --force + - uses: actions/setup-go@v5 + with: + go-version: '1.23' + + - uses: actions/cache@v4 + with: + path: | + C:\ProgramData\chocolatey\lib\mingw + C:\ProgramData\chocolatey\lib\cmake + key: chocolatey-${{ matrix.os }} + - name: Install dependencies + run: | + choco upgrade mingw -y --no-progress --version 13.2.0 + choco install cmake -y --no-progress --version 3.27.8 + + - name: test-integration + run: .\wmake.ps1 test-integration + + + + \ No newline at end of file From b996e91ee66188a3641c891d62ec33614973633e Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Thu, 7 Nov 2024 10:41:08 +0100 Subject: [PATCH 27/28] Caplin: View-Head (#12584) Yet - another huge PR :(. the View-Head design is to share one beacon state across rpc and the head notifier. it allows for a more controlled and less-deadlock prone API. it is based on `mdbx.View`, it also comes with a deadlock detector. --- cl/beacon/handler/block_production.go | 42 ++-- cl/beacon/handler/builder.go | 16 +- cl/beacon/handler/committees.go | 52 ++--- cl/beacon/handler/data_test.go | 7 +- cl/beacon/handler/duties_attester.go | 121 ++++++------ cl/beacon/handler/duties_proposer.go | 104 +++++----- cl/beacon/handler/pool.go | 27 ++- cl/beacon/handler/pool_test.go | 28 ++- cl/beacon/handler/subscription.go | 23 ++- cl/beacon/handler/utils_test.go | 2 +- cl/beacon/handler/validators.go | 41 ++-- cl/beacon/synced_data/interface.go | 7 +- .../mock_services/synced_data_mock.go | 155 +++++++++------ cl/beacon/synced_data/synced_data.go | 76 ++++---- cl/monitor/validator.go | 21 +- cl/phase1/core/state/accessors.go | 4 +- cl/phase1/core/state/cache.go | 4 +- cl/phase1/core/state/cache_accessors.go | 8 +- cl/phase1/forkchoice/fork_choice_test.go | 4 +- cl/phase1/forkchoice/forkchoice.go | 10 - cl/phase1/forkchoice/interface.go | 1 - .../mock_services/forkchoice_mock.go | 7 - cl/phase1/forkchoice/on_attestation.go | 12 +- cl/phase1/forkchoice/on_attester_slashing.go | 31 +-- cl/phase1/network/gossip_manager.go | 3 +- .../services/aggregate_and_proof_service.go | 183 ++++++++++-------- .../aggregate_and_proof_service_test.go | 15 +- .../network/services/attestation_service.go | 142 +++++++------- .../services/attestation_service_test.go | 33 +--- .../network/services/blob_sidecar_service.go | 116 +++++------ .../services/blob_sidecar_service_test.go | 2 +- cl/phase1/network/services/block_service.go | 40 ++-- .../network/services/block_service_test.go | 2 +- .../bls_to_execution_change_service.go | 42 ++-- .../bls_to_execution_change_service_test.go | 108 +++++------ .../services/proposer_slashing_service.go | 62 +++--- .../proposer_slashing_service_test.go | 75 ++++--- .../sync_committee_messages_service.go | 69 +++---- .../sync_committee_messages_service_test.go | 2 +- .../services/sync_contribution_service.go | 120 ++++++------ .../sync_contribution_service_test.go | 2 +- .../services/voluntary_exit_service.go | 107 +++++----- .../services/voluntary_exit_service_test.go | 80 ++++---- cl/phase1/stages/forkchoice.go | 78 +++++--- cl/spectest/consensus_tests/fork_choice.go | 2 +- .../finalization_and_justification.go | 4 +- .../process_rewards_and_penalties.go | 4 +- cl/utils/threading/parallel_executor.go | 74 +++++++ cl/utils/threading/worker_pool.go | 79 -------- .../committee_subscription.go | 7 +- cmd/caplin/caplin1/run.go | 2 +- erigon-lib/common/dbg/experiments.go | 2 + 52 files changed, 1188 insertions(+), 1070 deletions(-) create mode 100644 cl/utils/threading/parallel_executor.go delete mode 100644 cl/utils/threading/worker_pool.go diff --git a/cl/beacon/handler/block_production.go b/cl/beacon/handler/block_production.go index adb61d0480b..494e12e9f41 100644 --- a/cl/beacon/handler/block_production.go +++ b/cl/beacon/handler/block_production.go @@ -153,30 +153,26 @@ func (a *ApiHandler) GetEthV1ValidatorAttestationData( committeeIndex = &zero } - headState, cn := a.syncedData.HeadState() - defer cn() - - if headState == nil { - return nil, beaconhttp.NewEndpointError( - http.StatusServiceUnavailable, - errors.New("beacon node is still syncing"), - ) - } - - attestationData, err := a.attestationProducer.ProduceAndCacheAttestationData( - tx, - headState, - a.syncedData.HeadRoot(), - *slot, - *committeeIndex, - ) - if err == attestation_producer.ErrHeadStateBehind { - return nil, beaconhttp.NewEndpointError( - http.StatusServiceUnavailable, - errors.New("beacon node is still syncing"), + var attestationData solid.AttestationData + if err := a.syncedData.ViewHeadState(func(headState *state.CachingBeaconState) error { + attestationData, err = a.attestationProducer.ProduceAndCacheAttestationData( + tx, + headState, + a.syncedData.HeadRoot(), + *slot, + *committeeIndex, ) - } else if err != nil { - return nil, beaconhttp.NewEndpointError(http.StatusInternalServerError, err) + if err == attestation_producer.ErrHeadStateBehind { + return beaconhttp.NewEndpointError( + http.StatusServiceUnavailable, + synced_data.ErrNotSynced, + ) + } else if err != nil { + return beaconhttp.NewEndpointError(http.StatusInternalServerError, err) + } + return nil + }); err != nil { + return nil, err } return newBeaconResponse(attestationData), nil diff --git a/cl/beacon/handler/builder.go b/cl/beacon/handler/builder.go index d323eec028e..46102a9ffce 100644 --- a/cl/beacon/handler/builder.go +++ b/cl/beacon/handler/builder.go @@ -67,15 +67,15 @@ func (a *ApiHandler) GetEth1V1BuilderStatesExpectedWithdrawals(w http.ResponseWr return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, errors.New("beacon node is syncing")) } if root == headRoot { - headState, cn := a.syncedData.HeadState() - defer cn() - if headState == nil { - return nil, beaconhttp.NewEndpointError( - http.StatusServiceUnavailable, - errors.New("node is syncing"), - ) + var expectedWithdrawals []*cltypes.Withdrawal + + if err := a.syncedData.ViewHeadState(func(headState *state.CachingBeaconState) error { + expectedWithdrawals = state.ExpectedWithdrawals(headState, state.Epoch(headState)) + return nil + }); err != nil { + return nil, err } - return newBeaconResponse(state.ExpectedWithdrawals(headState, state.Epoch(headState))).WithFinalized(false), nil + return newBeaconResponse(expectedWithdrawals).WithFinalized(false), nil } lookAhead := 1024 for currSlot := *slot + 1; currSlot < *slot+uint64(lookAhead); currSlot++ { diff --git a/cl/beacon/handler/committees.go b/cl/beacon/handler/committees.go index 9f4d25b022a..40e2dccf5c1 100644 --- a/cl/beacon/handler/committees.go +++ b/cl/beacon/handler/committees.go @@ -17,7 +17,6 @@ package handler import ( - "errors" "fmt" "net/http" "strconv" @@ -86,39 +85,42 @@ func (a *ApiHandler) getCommittees(w http.ResponseWriter, r *http.Request) (*bea } resp := make([]*committeeResponse, 0, a.beaconChainCfg.SlotsPerEpoch*a.beaconChainCfg.MaxCommitteesPerSlot) isFinalized := slot <= a.forkchoiceStore.FinalizedSlot() - s, cn := a.syncedData.HeadState() - defer cn() + // s, cn := a.syncedData.HeadState() + // defer cn() if a.forkchoiceStore.LowestAvailableSlot() <= slot { // non-finality case - if s == nil { - return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, errors.New("node is syncing")) - } - if epoch > state.Epoch(s)+1 { - return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("epoch %d is too far in the future", epoch)) - } - // get active validator indicies - committeeCount := s.CommitteeCount(epoch) - // now start obtaining the committees from the head state - for currSlot := epoch * a.beaconChainCfg.SlotsPerEpoch; currSlot < (epoch+1)*a.beaconChainCfg.SlotsPerEpoch; currSlot++ { - if slotFilter != nil && currSlot != *slotFilter { - continue + if err := a.syncedData.ViewHeadState(func(s *state.CachingBeaconState) error { + if epoch > state.Epoch(s)+1 { + return beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("epoch %d is too far in the future", epoch)) } - for committeeIndex := uint64(0); committeeIndex < committeeCount; committeeIndex++ { - if index != nil && committeeIndex != *index { + // get active validator indicies + committeeCount := s.CommitteeCount(epoch) + // now start obtaining the committees from the head state + for currSlot := epoch * a.beaconChainCfg.SlotsPerEpoch; currSlot < (epoch+1)*a.beaconChainCfg.SlotsPerEpoch; currSlot++ { + if slotFilter != nil && currSlot != *slotFilter { continue } - data := &committeeResponse{Index: committeeIndex, Slot: currSlot} - idxs, err := s.GetBeaconCommitee(currSlot, committeeIndex) - if err != nil { - return nil, err + for committeeIndex := uint64(0); committeeIndex < committeeCount; committeeIndex++ { + if index != nil && committeeIndex != *index { + continue + } + data := &committeeResponse{Index: committeeIndex, Slot: currSlot} + idxs, err := s.GetBeaconCommitee(currSlot, committeeIndex) + if err != nil { + return err + } + for _, idx := range idxs { + data.Validators = append(data.Validators, strconv.FormatUint(idx, 10)) + } + resp = append(resp, data) } - for _, idx := range idxs { - data.Validators = append(data.Validators, strconv.FormatUint(idx, 10)) - } - resp = append(resp, data) } + return nil + }); err != nil { + return nil, err } + return newBeaconResponse(resp).WithFinalized(isFinalized).WithOptimistic(isOptimistic), nil } // finality case diff --git a/cl/beacon/handler/data_test.go b/cl/beacon/handler/data_test.go index 1fd236441dd..bbeba27a791 100644 --- a/cl/beacon/handler/data_test.go +++ b/cl/beacon/handler/data_test.go @@ -103,9 +103,10 @@ func defaultHarnessOpts(c harnessConfig) []beacontest.HarnessOption { sm.OnHeadState(postState) var s *state.CachingBeaconState for s == nil { - var cn func() - s, cn = sm.HeadState() - cn() + sm.ViewHeadState(func(headState *state.CachingBeaconState) error { + s = headState + return nil + }) } s.SetSlot(789274827847783) diff --git a/cl/beacon/handler/duties_attester.go b/cl/beacon/handler/duties_attester.go index 5085975a128..5c8ff7b1e75 100644 --- a/cl/beacon/handler/duties_attester.go +++ b/cl/beacon/handler/duties_attester.go @@ -18,7 +18,6 @@ package handler import ( "encoding/json" - "errors" "fmt" "net/http" "strconv" @@ -39,22 +38,28 @@ type attesterDutyResponse struct { Slot uint64 `json:"slot,string"` } -func (a *ApiHandler) getDependentRoot(s *state.CachingBeaconState, epoch uint64) libcommon.Hash { - dependentRootSlot := ((epoch - 1) * a.beaconChainCfg.SlotsPerEpoch) - 3 - maxIterations := 2048 - for i := 0; i < maxIterations; i++ { - if dependentRootSlot > epoch*a.beaconChainCfg.SlotsPerEpoch { - return libcommon.Hash{} - } +func (a *ApiHandler) getDependentRoot(epoch uint64) (libcommon.Hash, error) { + var ( + dependentRoot libcommon.Hash + err error + ) + return dependentRoot, a.syncedData.ViewHeadState(func(s *state.CachingBeaconState) error { + dependentRootSlot := ((epoch - 1) * a.beaconChainCfg.SlotsPerEpoch) - 3 + maxIterations := 2048 + for i := 0; i < maxIterations; i++ { + if dependentRootSlot > epoch*a.beaconChainCfg.SlotsPerEpoch { + return nil + } - dependentRoot, err := s.GetBlockRootAtSlot(dependentRootSlot) - if err != nil { - dependentRootSlot-- - continue + dependentRoot, err = s.GetBlockRootAtSlot(dependentRootSlot) + if err != nil { + dependentRootSlot-- + continue + } + return nil } - return dependentRoot - } - return libcommon.Hash{} + return nil + }) } func (a *ApiHandler) getAttesterDuties(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { @@ -62,12 +67,11 @@ func (a *ApiHandler) getAttesterDuties(w http.ResponseWriter, r *http.Request) ( if err != nil { return nil, err } - s, cn := a.syncedData.HeadState() - defer cn() - if s == nil { - return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, errors.New("node is syncing")) + + dependentRoot, err := a.getDependentRoot(epoch) + if err != nil { + return nil, err } - dependentRoot := a.getDependentRoot(s, epoch) var idxsStr []string if err := json.NewDecoder(r.Body).Decode(&idxsStr); err != nil { @@ -90,59 +94,60 @@ func (a *ApiHandler) getAttesterDuties(w http.ResponseWriter, r *http.Request) ( idxSet[int(idx)] = struct{}{} } - tx, err := a.indiciesDB.BeginRo(r.Context()) - if err != nil { - return nil, err - } - defer tx.Rollback() - resp := []attesterDutyResponse{} // get the duties if a.forkchoiceStore.LowestAvailableSlot() <= epoch*a.beaconChainCfg.SlotsPerEpoch { // non-finality case + if err := a.syncedData.ViewHeadState(func(s *state.CachingBeaconState) error { + if epoch > state.Epoch(s)+3 { + return beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("epoch %d is too far in the future", epoch)) + } - if s == nil { - return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, errors.New("node is syncing")) - } - - if epoch > state.Epoch(s)+3 { - return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("epoch %d is too far in the future", epoch)) - } - - // get active validator indicies - committeeCount := s.CommitteeCount(epoch) - // now start obtaining the committees from the head state - for currSlot := epoch * a.beaconChainCfg.SlotsPerEpoch; currSlot < (epoch+1)*a.beaconChainCfg.SlotsPerEpoch; currSlot++ { - for committeeIndex := uint64(0); committeeIndex < committeeCount; committeeIndex++ { - idxs, err := s.GetBeaconCommitee(currSlot, committeeIndex) - if err != nil { - return nil, err - } - for vIdx, idx := range idxs { - if _, ok := idxSet[int(idx)]; !ok { - continue - } - publicKey, err := s.ValidatorPublicKey(int(idx)) + // get active validator indicies + committeeCount := s.CommitteeCount(epoch) + // now start obtaining the committees from the head state + for currSlot := epoch * a.beaconChainCfg.SlotsPerEpoch; currSlot < (epoch+1)*a.beaconChainCfg.SlotsPerEpoch; currSlot++ { + for committeeIndex := uint64(0); committeeIndex < committeeCount; committeeIndex++ { + idxs, err := s.GetBeaconCommitee(currSlot, committeeIndex) if err != nil { - return nil, err + return err } - duty := attesterDutyResponse{ - Pubkey: publicKey, - ValidatorIndex: idx, - CommitteeIndex: committeeIndex, - CommitteeLength: uint64(len(idxs)), - ValidatorCommitteeIndex: uint64(vIdx), - CommitteesAtSlot: committeeCount, - Slot: currSlot, + for vIdx, idx := range idxs { + if _, ok := idxSet[int(idx)]; !ok { + continue + } + publicKey, err := s.ValidatorPublicKey(int(idx)) + if err != nil { + return err + } + duty := attesterDutyResponse{ + Pubkey: publicKey, + ValidatorIndex: idx, + CommitteeIndex: committeeIndex, + CommitteeLength: uint64(len(idxs)), + ValidatorCommitteeIndex: uint64(vIdx), + CommitteesAtSlot: committeeCount, + Slot: currSlot, + } + resp = append(resp, duty) } - resp = append(resp, duty) } } + return nil + }); err != nil { + return nil, err } + return newBeaconResponse(resp).WithOptimistic(a.forkchoiceStore.IsHeadOptimistic()).With("dependent_root", dependentRoot), nil } + tx, err := a.indiciesDB.BeginRo(r.Context()) + if err != nil { + return nil, err + } + defer tx.Rollback() + stageStateProgress, err := state_accessors.GetStateProcessingProgress(tx) if err != nil { return nil, err diff --git a/cl/beacon/handler/duties_proposer.go b/cl/beacon/handler/duties_proposer.go index 342cf68eb0f..7640cd416eb 100644 --- a/cl/beacon/handler/duties_proposer.go +++ b/cl/beacon/handler/duties_proposer.go @@ -26,6 +26,7 @@ import ( "github.com/erigontech/erigon/cl/beacon/beaconhttp" "github.com/erigontech/erigon/cl/persistence/base_encoding" state_accessors "github.com/erigontech/erigon/cl/persistence/state" + "github.com/erigontech/erigon/cl/phase1/core/state" shuffling2 "github.com/erigontech/erigon/cl/phase1/core/state/shuffling" libcommon "github.com/erigontech/erigon-lib/common" @@ -43,12 +44,12 @@ func (a *ApiHandler) getDutiesProposer(w http.ResponseWriter, r *http.Request) ( if err != nil { return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err) } - s, cn := a.syncedData.HeadState() - defer cn() - if s == nil { - return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, errors.New("node is syncing")) + + dependentRoot, err := a.getDependentRoot(epoch) + if err != nil { + return nil, err } - dependentRoot := a.getDependentRoot(s, epoch) + if epoch < a.forkchoiceStore.FinalizedCheckpoint().Epoch { tx, err := a.indiciesDB.BeginRo(r.Context()) if err != nil { @@ -89,51 +90,56 @@ func (a *ApiHandler) getDutiesProposer(w http.ResponseWriter, r *http.Request) ( duties := make([]proposerDuties, a.beaconChainCfg.SlotsPerEpoch) wg := sync.WaitGroup{} - for slot := expectedSlot; slot < expectedSlot+a.beaconChainCfg.SlotsPerEpoch; slot++ { - // Lets do proposer index computation - mixPosition := (epoch + a.beaconChainCfg.EpochsPerHistoricalVector - a.beaconChainCfg.MinSeedLookahead - 1) % - a.beaconChainCfg.EpochsPerHistoricalVector - // Input for the seed hash. - mix := s.GetRandaoMix(int(mixPosition)) - input := shuffling2.GetSeed(a.beaconChainCfg, mix, epoch, a.beaconChainCfg.DomainBeaconProposer) - slotByteArray := make([]byte, 8) - binary.LittleEndian.PutUint64(slotByteArray, slot) - - // Add slot to the end of the input. - inputWithSlot := append(input[:], slotByteArray...) - hash := sha256.New() - - // Calculate the hash. - hash.Write(inputWithSlot) - seed := hash.Sum(nil) - - indices := s.GetActiveValidatorsIndices(epoch) - - // Write the seed to an array. - seedArray := [32]byte{} - copy(seedArray[:], seed) - wg.Add(1) - - // Do it in parallel - go func(i, slot uint64, indicies []uint64, seedArray [32]byte) { - defer wg.Done() - proposerIndex, err := shuffling2.ComputeProposerIndex(s.BeaconState, indices, seedArray) - if err != nil { - panic(err) - } - var pk libcommon.Bytes48 - pk, err = s.ValidatorPublicKey(int(proposerIndex)) - if err != nil { - panic(err) - } - duties[i] = proposerDuties{ - Pubkey: pk, - ValidatorIndex: proposerIndex, - Slot: slot, - } - }(slot-expectedSlot, slot, indices, seedArray) + if err := a.syncedData.ViewHeadState(func(s *state.CachingBeaconState) error { + for slot := expectedSlot; slot < expectedSlot+a.beaconChainCfg.SlotsPerEpoch; slot++ { + // Lets do proposer index computation + mixPosition := (epoch + a.beaconChainCfg.EpochsPerHistoricalVector - a.beaconChainCfg.MinSeedLookahead - 1) % + a.beaconChainCfg.EpochsPerHistoricalVector + // Input for the seed hash. + mix := s.GetRandaoMix(int(mixPosition)) + input := shuffling2.GetSeed(a.beaconChainCfg, mix, epoch, a.beaconChainCfg.DomainBeaconProposer) + slotByteArray := make([]byte, 8) + binary.LittleEndian.PutUint64(slotByteArray, slot) + + // Add slot to the end of the input. + inputWithSlot := append(input[:], slotByteArray...) + hash := sha256.New() + + // Calculate the hash. + hash.Write(inputWithSlot) + seed := hash.Sum(nil) + + indices := s.GetActiveValidatorsIndices(epoch) + + // Write the seed to an array. + seedArray := [32]byte{} + copy(seedArray[:], seed) + wg.Add(1) + + // Do it in parallel + go func(i, slot uint64, indicies []uint64, seedArray [32]byte) { + defer wg.Done() + proposerIndex, err := shuffling2.ComputeProposerIndex(s.BeaconState, indices, seedArray) + if err != nil { + panic(err) + } + var pk libcommon.Bytes48 + pk, err = s.ValidatorPublicKey(int(proposerIndex)) + if err != nil { + panic(err) + } + duties[i] = proposerDuties{ + Pubkey: pk, + ValidatorIndex: proposerIndex, + Slot: slot, + } + }(slot-expectedSlot, slot, indices, seedArray) + } + wg.Wait() + return nil + }); err != nil { + return nil, err } - wg.Wait() return newBeaconResponse(duties).WithFinalized(false).WithVersion(a.beaconChainCfg.GetCurrentStateVersion(epoch)).With("dependent_root", dependentRoot), nil } diff --git a/cl/beacon/handler/pool.go b/cl/beacon/handler/pool.go index a6605267a56..bd076f7493b 100644 --- a/cl/beacon/handler/pool.go +++ b/cl/beacon/handler/pool.go @@ -29,6 +29,7 @@ import ( "github.com/erigontech/erigon/cl/cltypes" "github.com/erigontech/erigon/cl/cltypes/solid" "github.com/erigontech/erigon/cl/gossip" + "github.com/erigontech/erigon/cl/phase1/core/state" "github.com/erigontech/erigon/cl/phase1/network/services" "github.com/erigontech/erigon/cl/phase1/network/subnets" ) @@ -95,9 +96,7 @@ func (a *ApiHandler) PostEthV1BeaconPoolAttestations(w http.ResponseWriter, r *h failures := []poolingFailure{} for i, attestation := range req { - headState, cn := a.syncedData.HeadState() - defer cn() - if headState == nil { + if a.syncedData.Syncing() { beaconhttp.NewEndpointError(http.StatusServiceUnavailable, errors.New("head state not available")).WriteTo(w) return } @@ -106,10 +105,9 @@ func (a *ApiHandler) PostEthV1BeaconPoolAttestations(w http.ResponseWriter, r *h epoch = a.ethClock.GetEpochAtSlot(slot) attClVersion = a.beaconChainCfg.GetCurrentStateVersion(epoch) cIndex = attestation.Data.CommitteeIndex - committeeCountPerSlot = headState.CommitteeCount(slot / a.beaconChainCfg.SlotsPerEpoch) + committeeCountPerSlot = a.syncedData.CommitteeCount(slot / a.beaconChainCfg.SlotsPerEpoch) ) - cn() if attClVersion.AfterOrEqual(clparams.ElectraVersion) { index, err := attestation.ElectraSingleCommitteeIndex() if err != nil { @@ -340,21 +338,22 @@ func (a *ApiHandler) PostEthV1BeaconPoolSyncCommittees(w http.ResponseWriter, r http.Error(w, err.Error(), http.StatusBadRequest) return } + var err error failures := []poolingFailure{} for idx, v := range msgs { - s, cn := a.syncedData.HeadState() - defer cn() - if s == nil { - http.Error(w, "node is not synced", http.StatusServiceUnavailable) - return - } - publishingSubnets, err := subnets.ComputeSubnetsForSyncCommittee(s, v.ValidatorIndex) - if err != nil { + var publishingSubnets []uint64 + if err := a.syncedData.ViewHeadState(func(headState *state.CachingBeaconState) error { + publishingSubnets, err = subnets.ComputeSubnetsForSyncCommittee(headState, v.ValidatorIndex) + if err != nil { + return err + } + return nil + }); err != nil { failures = append(failures, poolingFailure{Index: idx, Message: err.Error()}) continue } - cn() + for _, subnet := range publishingSubnets { if err = a.syncCommitteeMessagesService.ProcessMessage(r.Context(), &subnet, v); err != nil && !errors.Is(err, services.ErrIgnore) { log.Warn("[Beacon REST] failed to process attestation in syncCommittee service", "err", err) diff --git a/cl/beacon/handler/pool_test.go b/cl/beacon/handler/pool_test.go index 71726e0e300..66250fbc7b2 100644 --- a/cl/beacon/handler/pool_test.go +++ b/cl/beacon/handler/pool_test.go @@ -23,6 +23,7 @@ import ( "testing" "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" libcommon "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/log/v3" @@ -50,7 +51,10 @@ func TestPoolAttesterSlashings(t *testing.T) { _, _, _, _, _, handler, _, syncedDataMgr, _, _ := setupTestingHandler(t, clparams.Phase0Version, log.Root(), false) mockBeaconState := &state.CachingBeaconState{BeaconState: raw.New(&clparams.BeaconChainConfig{})} mockBeaconState.SetVersion(clparams.DenebVersion) - syncedDataMgr.(*sync_mock_services.MockSyncedData).EXPECT().HeadState().Return(mockBeaconState, synced_data.EmptyCancel).AnyTimes() + syncedDataMgr.(*sync_mock_services.MockSyncedData).EXPECT().ViewHeadState(gomock.Any()).DoAndReturn(func(vhsf synced_data.ViewHeadStateFn) error { + vhsf(mockBeaconState) + return nil + }).AnyTimes() server := httptest.NewServer(handler.mux) defer server.Close() @@ -103,8 +107,10 @@ func TestPoolProposerSlashings(t *testing.T) { _, _, _, _, _, handler, _, syncedDataMgr, _, _ := setupTestingHandler(t, clparams.Phase0Version, log.Root(), false) mockBeaconState := &state.CachingBeaconState{BeaconState: raw.New(&clparams.BeaconChainConfig{})} mockBeaconState.SetVersion(clparams.DenebVersion) - syncedDataMgr.(*sync_mock_services.MockSyncedData).EXPECT().HeadState().Return(mockBeaconState, synced_data.EmptyCancel).AnyTimes() - + syncedDataMgr.(*sync_mock_services.MockSyncedData).EXPECT().ViewHeadState(gomock.Any()).DoAndReturn(func(vhsf synced_data.ViewHeadStateFn) error { + vhsf(mockBeaconState) + return nil + }).AnyTimes() server := httptest.NewServer(handler.mux) defer server.Close() // json @@ -147,8 +153,10 @@ func TestPoolVoluntaryExits(t *testing.T) { _, _, _, _, _, handler, _, syncedDataMgr, _, _ := setupTestingHandler(t, clparams.Phase0Version, log.Root(), false) mockBeaconState := &state.CachingBeaconState{BeaconState: raw.New(&clparams.BeaconChainConfig{})} mockBeaconState.SetVersion(clparams.DenebVersion) - syncedDataMgr.(*sync_mock_services.MockSyncedData).EXPECT().HeadState().Return(mockBeaconState, synced_data.EmptyCancel).AnyTimes() - + syncedDataMgr.(*sync_mock_services.MockSyncedData).EXPECT().ViewHeadState(gomock.Any()).DoAndReturn(func(vhsf synced_data.ViewHeadStateFn) error { + vhsf(mockBeaconState) + return nil + }).AnyTimes() server := httptest.NewServer(handler.mux) defer server.Close() // json @@ -197,7 +205,10 @@ func TestPoolBlsToExecutionChainges(t *testing.T) { _, _, _, _, _, handler, _, syncedDataMgr, _, _ := setupTestingHandler(t, clparams.Phase0Version, log.Root(), false) mockBeaconState := &state.CachingBeaconState{BeaconState: raw.New(&clparams.BeaconChainConfig{})} mockBeaconState.SetVersion(clparams.DenebVersion) - syncedDataMgr.(*sync_mock_services.MockSyncedData).EXPECT().HeadState().Return(mockBeaconState, synced_data.EmptyCancel).AnyTimes() + syncedDataMgr.(*sync_mock_services.MockSyncedData).EXPECT().ViewHeadState(gomock.Any()).DoAndReturn(func(vhsf synced_data.ViewHeadStateFn) error { + vhsf(mockBeaconState) + return nil + }).AnyTimes() server := httptest.NewServer(handler.mux) defer server.Close() @@ -258,7 +269,10 @@ func TestPoolAggregatesAndProofs(t *testing.T) { _, _, _, _, _, handler, _, syncedDataMgr, _, _ := setupTestingHandler(t, clparams.Phase0Version, log.Root(), false) mockBeaconState := &state.CachingBeaconState{BeaconState: raw.New(&clparams.BeaconChainConfig{})} mockBeaconState.SetVersion(clparams.DenebVersion) - syncedDataMgr.(*sync_mock_services.MockSyncedData).EXPECT().HeadState().Return(mockBeaconState, synced_data.EmptyCancel).AnyTimes() + syncedDataMgr.(*sync_mock_services.MockSyncedData).EXPECT().ViewHeadState(gomock.Any()).DoAndReturn(func(vhsf synced_data.ViewHeadStateFn) error { + vhsf(mockBeaconState) + return nil + }).AnyTimes() server := httptest.NewServer(handler.mux) defer server.Close() diff --git a/cl/beacon/handler/subscription.go b/cl/beacon/handler/subscription.go index 87fa33deecd..63e187fd79c 100644 --- a/cl/beacon/handler/subscription.go +++ b/cl/beacon/handler/subscription.go @@ -31,6 +31,7 @@ import ( "github.com/erigontech/erigon/cl/beacon/beaconhttp" "github.com/erigontech/erigon/cl/cltypes" "github.com/erigontech/erigon/cl/gossip" + "github.com/erigontech/erigon/cl/phase1/core/state" "github.com/erigontech/erigon/cl/phase1/network/subnets" ) @@ -67,18 +68,22 @@ func (a *ApiHandler) PostEthV1ValidatorSyncCommitteeSubscriptions(w http.Respons syncnets = append(syncnets, uint64(i)) } } else { - headState, cn := a.syncedData.HeadState() - defer cn() - if headState == nil { - http.Error(w, "head state not available", http.StatusServiceUnavailable) - return - } - syncnets, err = subnets.ComputeSubnetsForSyncCommittee(headState, subRequest.ValidatorIndex) - if err != nil { + // headState, cn := a.syncedData.HeadState() + // defer cn() + // if headState == nil { + // http.Error(w, "head state not available", http.StatusServiceUnavailable) + // return + // } + if err := a.syncedData.ViewHeadState(func(headState *state.CachingBeaconState) error { + syncnets, err = subnets.ComputeSubnetsForSyncCommittee(headState, subRequest.ValidatorIndex) + if err != nil { + return err + } + return nil + }); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } - cn() } // subscribe to subnets diff --git a/cl/beacon/handler/utils_test.go b/cl/beacon/handler/utils_test.go index 13d263570b1..83130f74b31 100644 --- a/cl/beacon/handler/utils_test.go +++ b/cl/beacon/handler/utils_test.go @@ -86,7 +86,7 @@ func setupTestingHandler(t *testing.T, v clparams.StateVersion, logger log.Logge fcu.Pool = opPool if useRealSyncDataMgr { - syncedData = synced_data.NewSyncedDataManager(true, &bcfg) + syncedData = synced_data.NewSyncedDataManager(&bcfg, true, 0) } else { syncedData = sync_mock_services.NewMockSyncedData(ctrl) } diff --git a/cl/beacon/handler/validators.go b/cl/beacon/handler/validators.go index 4cdcc6e6bc0..598501f8320 100644 --- a/cl/beacon/handler/validators.go +++ b/cl/beacon/handler/validators.go @@ -318,13 +318,12 @@ func (a *ApiHandler) writeValidatorsResponse( } if blockId.Head() { // Lets see if we point to head, if yes then we need to look at the head state we always keep. - s, cn := a.syncedData.HeadState() - defer cn() - if s == nil { + if err := a.syncedData.ViewHeadState(func(s *state.CachingBeaconState) error { + responseValidators(w, filterIndicies, statusFilters, state.Epoch(s), s.Balances(), s.Validators(), false, isOptimistic) + return nil + }); err != nil { http.Error(w, errors.New("node is not synced").Error(), http.StatusServiceUnavailable) - return } - responseValidators(w, filterIndicies, statusFilters, state.Epoch(s), s.Balances(), s.Validators(), false, isOptimistic) return } slot, err := beacon_indicies.ReadBlockSlotByBlockRoot(tx, blockRoot) @@ -456,15 +455,22 @@ func (a *ApiHandler) GetEthV1BeaconStatesValidator(w http.ResponseWriter, r *htt } if blockId.Head() { // Lets see if we point to head, if yes then we need to look at the head state we always keep. - s, cn := a.syncedData.HeadState() - defer cn() - if s == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("node is not synced")) - } - if s.ValidatorLength() <= int(validatorIndex) { - return newBeaconResponse([]int{}).WithFinalized(false), nil + var ( + resp *beaconhttp.BeaconResponse + err error + ) + if err := a.syncedData.ViewHeadState(func(s *state.CachingBeaconState) error { + if s.ValidatorLength() <= int(validatorIndex) { + resp = newBeaconResponse([]int{}).WithFinalized(false) + return nil + } + resp, err = responseValidator(validatorIndex, state.Epoch(s), s.Balances(), s.Validators(), false, isOptimistic) + return nil // return err later + }); err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, errors.New("node is not synced")) } - return responseValidator(validatorIndex, state.Epoch(s), s.Balances(), s.Validators(), false, isOptimistic) + + return resp, err } slot, err := beacon_indicies.ReadBlockSlotByBlockRoot(tx, blockRoot) if err != nil { @@ -578,13 +584,12 @@ func (a *ApiHandler) getValidatorBalances(ctx context.Context, w http.ResponseWr isOptimistic := a.forkchoiceStore.IsRootOptimistic(blockRoot) if blockId.Head() { // Lets see if we point to head, if yes then we need to look at the head state we always keep. - s, cn := a.syncedData.HeadState() - defer cn() - if s == nil { + if err := a.syncedData.ViewHeadState(func(s *state.CachingBeaconState) error { + responseValidatorsBalances(w, filterIndicies, s.Balances(), false, isOptimistic) + return nil + }); err != nil { http.Error(w, errors.New("node is not synced").Error(), http.StatusServiceUnavailable) - return } - responseValidatorsBalances(w, filterIndicies, s.Balances(), false, isOptimistic) return } slot, err := beacon_indicies.ReadBlockSlotByBlockRoot(tx, blockRoot) diff --git a/cl/beacon/synced_data/interface.go b/cl/beacon/synced_data/interface.go index 5539865c664..0cb84905d2a 100644 --- a/cl/beacon/synced_data/interface.go +++ b/cl/beacon/synced_data/interface.go @@ -18,18 +18,19 @@ package synced_data import ( "github.com/erigontech/erigon-lib/common" - "github.com/erigontech/erigon/cl/abstract" "github.com/erigontech/erigon/cl/phase1/core/state" ) type CancelFn func() +type ViewHeadStateFn func(headState *state.CachingBeaconState) error //go:generate mockgen -typed=true -destination=./mock_services/synced_data_mock.go -package=mock_services . SyncedData type SyncedData interface { OnHeadState(newState *state.CachingBeaconState) error - HeadState() (*state.CachingBeaconState, CancelFn) - HeadStateReader() (abstract.BeaconStateReader, CancelFn) + UnsetHeadState() + ViewHeadState(fn ViewHeadStateFn) error Syncing() bool HeadSlot() uint64 HeadRoot() common.Hash + CommitteeCount(epoch uint64) uint64 } diff --git a/cl/beacon/synced_data/mock_services/synced_data_mock.go b/cl/beacon/synced_data/mock_services/synced_data_mock.go index 557bb1e64c0..c89b8a7826e 100644 --- a/cl/beacon/synced_data/mock_services/synced_data_mock.go +++ b/cl/beacon/synced_data/mock_services/synced_data_mock.go @@ -13,7 +13,6 @@ import ( reflect "reflect" common "github.com/erigontech/erigon-lib/common" - abstract "github.com/erigontech/erigon/cl/abstract" synced_data "github.com/erigontech/erigon/cl/beacon/synced_data" state "github.com/erigontech/erigon/cl/phase1/core/state" gomock "go.uber.org/mock/gomock" @@ -42,6 +41,44 @@ func (m *MockSyncedData) EXPECT() *MockSyncedDataMockRecorder { return m.recorder } +// CommitteeCount mocks base method. +func (m *MockSyncedData) CommitteeCount(arg0 uint64) uint64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CommitteeCount", arg0) + ret0, _ := ret[0].(uint64) + return ret0 +} + +// CommitteeCount indicates an expected call of CommitteeCount. +func (mr *MockSyncedDataMockRecorder) CommitteeCount(arg0 any) *MockSyncedDataCommitteeCountCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitteeCount", reflect.TypeOf((*MockSyncedData)(nil).CommitteeCount), arg0) + return &MockSyncedDataCommitteeCountCall{Call: call} +} + +// MockSyncedDataCommitteeCountCall wrap *gomock.Call +type MockSyncedDataCommitteeCountCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockSyncedDataCommitteeCountCall) Return(arg0 uint64) *MockSyncedDataCommitteeCountCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockSyncedDataCommitteeCountCall) Do(f func(uint64) uint64) *MockSyncedDataCommitteeCountCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockSyncedDataCommitteeCountCall) DoAndReturn(f func(uint64) uint64) *MockSyncedDataCommitteeCountCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + // HeadRoot mocks base method. func (m *MockSyncedData) HeadRoot() common.Hash { m.ctrl.T.Helper() @@ -118,156 +155,152 @@ func (c *MockSyncedDataHeadSlotCall) DoAndReturn(f func() uint64) *MockSyncedDat return c } -// HeadState mocks base method. -func (m *MockSyncedData) HeadState() (*state.CachingBeaconState, synced_data.CancelFn) { +// OnHeadState mocks base method. +func (m *MockSyncedData) OnHeadState(arg0 *state.CachingBeaconState) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HeadState") - ret0, _ := ret[0].(*state.CachingBeaconState) - ret1, _ := ret[1].(synced_data.CancelFn) - return ret0, ret1 + ret := m.ctrl.Call(m, "OnHeadState", arg0) + ret0, _ := ret[0].(error) + return ret0 } -// HeadState indicates an expected call of HeadState. -func (mr *MockSyncedDataMockRecorder) HeadState() *MockSyncedDataHeadStateCall { +// OnHeadState indicates an expected call of OnHeadState. +func (mr *MockSyncedDataMockRecorder) OnHeadState(arg0 any) *MockSyncedDataOnHeadStateCall { mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadState", reflect.TypeOf((*MockSyncedData)(nil).HeadState)) - return &MockSyncedDataHeadStateCall{Call: call} + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnHeadState", reflect.TypeOf((*MockSyncedData)(nil).OnHeadState), arg0) + return &MockSyncedDataOnHeadStateCall{Call: call} } -// MockSyncedDataHeadStateCall wrap *gomock.Call -type MockSyncedDataHeadStateCall struct { +// MockSyncedDataOnHeadStateCall wrap *gomock.Call +type MockSyncedDataOnHeadStateCall struct { *gomock.Call } // Return rewrite *gomock.Call.Return -func (c *MockSyncedDataHeadStateCall) Return(arg0 *state.CachingBeaconState, arg1 synced_data.CancelFn) *MockSyncedDataHeadStateCall { - c.Call = c.Call.Return(arg0, arg1) +func (c *MockSyncedDataOnHeadStateCall) Return(arg0 error) *MockSyncedDataOnHeadStateCall { + c.Call = c.Call.Return(arg0) return c } // Do rewrite *gomock.Call.Do -func (c *MockSyncedDataHeadStateCall) Do(f func() (*state.CachingBeaconState, synced_data.CancelFn)) *MockSyncedDataHeadStateCall { +func (c *MockSyncedDataOnHeadStateCall) Do(f func(*state.CachingBeaconState) error) *MockSyncedDataOnHeadStateCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockSyncedDataHeadStateCall) DoAndReturn(f func() (*state.CachingBeaconState, synced_data.CancelFn)) *MockSyncedDataHeadStateCall { +func (c *MockSyncedDataOnHeadStateCall) DoAndReturn(f func(*state.CachingBeaconState) error) *MockSyncedDataOnHeadStateCall { c.Call = c.Call.DoAndReturn(f) return c } -// HeadStateReader mocks base method. -func (m *MockSyncedData) HeadStateReader() (abstract.BeaconStateReader, synced_data.CancelFn) { +// Syncing mocks base method. +func (m *MockSyncedData) Syncing() bool { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HeadStateReader") - ret0, _ := ret[0].(abstract.BeaconStateReader) - ret1, _ := ret[1].(synced_data.CancelFn) - return ret0, ret1 + ret := m.ctrl.Call(m, "Syncing") + ret0, _ := ret[0].(bool) + return ret0 } -// HeadStateReader indicates an expected call of HeadStateReader. -func (mr *MockSyncedDataMockRecorder) HeadStateReader() *MockSyncedDataHeadStateReaderCall { +// Syncing indicates an expected call of Syncing. +func (mr *MockSyncedDataMockRecorder) Syncing() *MockSyncedDataSyncingCall { mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadStateReader", reflect.TypeOf((*MockSyncedData)(nil).HeadStateReader)) - return &MockSyncedDataHeadStateReaderCall{Call: call} + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Syncing", reflect.TypeOf((*MockSyncedData)(nil).Syncing)) + return &MockSyncedDataSyncingCall{Call: call} } -// MockSyncedDataHeadStateReaderCall wrap *gomock.Call -type MockSyncedDataHeadStateReaderCall struct { +// MockSyncedDataSyncingCall wrap *gomock.Call +type MockSyncedDataSyncingCall struct { *gomock.Call } // Return rewrite *gomock.Call.Return -func (c *MockSyncedDataHeadStateReaderCall) Return(arg0 abstract.BeaconStateReader, arg1 synced_data.CancelFn) *MockSyncedDataHeadStateReaderCall { - c.Call = c.Call.Return(arg0, arg1) +func (c *MockSyncedDataSyncingCall) Return(arg0 bool) *MockSyncedDataSyncingCall { + c.Call = c.Call.Return(arg0) return c } // Do rewrite *gomock.Call.Do -func (c *MockSyncedDataHeadStateReaderCall) Do(f func() (abstract.BeaconStateReader, synced_data.CancelFn)) *MockSyncedDataHeadStateReaderCall { +func (c *MockSyncedDataSyncingCall) Do(f func() bool) *MockSyncedDataSyncingCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockSyncedDataHeadStateReaderCall) DoAndReturn(f func() (abstract.BeaconStateReader, synced_data.CancelFn)) *MockSyncedDataHeadStateReaderCall { +func (c *MockSyncedDataSyncingCall) DoAndReturn(f func() bool) *MockSyncedDataSyncingCall { c.Call = c.Call.DoAndReturn(f) return c } -// OnHeadState mocks base method. -func (m *MockSyncedData) OnHeadState(arg0 *state.CachingBeaconState) error { +// UnsetHeadState mocks base method. +func (m *MockSyncedData) UnsetHeadState() { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "OnHeadState", arg0) - ret0, _ := ret[0].(error) - return ret0 + m.ctrl.Call(m, "UnsetHeadState") } -// OnHeadState indicates an expected call of OnHeadState. -func (mr *MockSyncedDataMockRecorder) OnHeadState(arg0 any) *MockSyncedDataOnHeadStateCall { +// UnsetHeadState indicates an expected call of UnsetHeadState. +func (mr *MockSyncedDataMockRecorder) UnsetHeadState() *MockSyncedDataUnsetHeadStateCall { mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnHeadState", reflect.TypeOf((*MockSyncedData)(nil).OnHeadState), arg0) - return &MockSyncedDataOnHeadStateCall{Call: call} + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnsetHeadState", reflect.TypeOf((*MockSyncedData)(nil).UnsetHeadState)) + return &MockSyncedDataUnsetHeadStateCall{Call: call} } -// MockSyncedDataOnHeadStateCall wrap *gomock.Call -type MockSyncedDataOnHeadStateCall struct { +// MockSyncedDataUnsetHeadStateCall wrap *gomock.Call +type MockSyncedDataUnsetHeadStateCall struct { *gomock.Call } // Return rewrite *gomock.Call.Return -func (c *MockSyncedDataOnHeadStateCall) Return(arg0 error) *MockSyncedDataOnHeadStateCall { - c.Call = c.Call.Return(arg0) +func (c *MockSyncedDataUnsetHeadStateCall) Return() *MockSyncedDataUnsetHeadStateCall { + c.Call = c.Call.Return() return c } // Do rewrite *gomock.Call.Do -func (c *MockSyncedDataOnHeadStateCall) Do(f func(*state.CachingBeaconState) error) *MockSyncedDataOnHeadStateCall { +func (c *MockSyncedDataUnsetHeadStateCall) Do(f func()) *MockSyncedDataUnsetHeadStateCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockSyncedDataOnHeadStateCall) DoAndReturn(f func(*state.CachingBeaconState) error) *MockSyncedDataOnHeadStateCall { +func (c *MockSyncedDataUnsetHeadStateCall) DoAndReturn(f func()) *MockSyncedDataUnsetHeadStateCall { c.Call = c.Call.DoAndReturn(f) return c } -// Syncing mocks base method. -func (m *MockSyncedData) Syncing() bool { +// ViewHeadState mocks base method. +func (m *MockSyncedData) ViewHeadState(arg0 synced_data.ViewHeadStateFn) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Syncing") - ret0, _ := ret[0].(bool) + ret := m.ctrl.Call(m, "ViewHeadState", arg0) + ret0, _ := ret[0].(error) return ret0 } -// Syncing indicates an expected call of Syncing. -func (mr *MockSyncedDataMockRecorder) Syncing() *MockSyncedDataSyncingCall { +// ViewHeadState indicates an expected call of ViewHeadState. +func (mr *MockSyncedDataMockRecorder) ViewHeadState(arg0 any) *MockSyncedDataViewHeadStateCall { mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Syncing", reflect.TypeOf((*MockSyncedData)(nil).Syncing)) - return &MockSyncedDataSyncingCall{Call: call} + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ViewHeadState", reflect.TypeOf((*MockSyncedData)(nil).ViewHeadState), arg0) + return &MockSyncedDataViewHeadStateCall{Call: call} } -// MockSyncedDataSyncingCall wrap *gomock.Call -type MockSyncedDataSyncingCall struct { +// MockSyncedDataViewHeadStateCall wrap *gomock.Call +type MockSyncedDataViewHeadStateCall struct { *gomock.Call } // Return rewrite *gomock.Call.Return -func (c *MockSyncedDataSyncingCall) Return(arg0 bool) *MockSyncedDataSyncingCall { +func (c *MockSyncedDataViewHeadStateCall) Return(arg0 error) *MockSyncedDataViewHeadStateCall { c.Call = c.Call.Return(arg0) return c } // Do rewrite *gomock.Call.Do -func (c *MockSyncedDataSyncingCall) Do(f func() bool) *MockSyncedDataSyncingCall { +func (c *MockSyncedDataViewHeadStateCall) Do(f func(synced_data.ViewHeadStateFn) error) *MockSyncedDataViewHeadStateCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockSyncedDataSyncingCall) DoAndReturn(f func() bool) *MockSyncedDataSyncingCall { +func (c *MockSyncedDataViewHeadStateCall) DoAndReturn(f func(synced_data.ViewHeadStateFn) error) *MockSyncedDataViewHeadStateCall { c.Call = c.Call.DoAndReturn(f) return c } diff --git a/cl/beacon/synced_data/synced_data.go b/cl/beacon/synced_data/synced_data.go index 86ac04b1aa5..600d6d3898c 100644 --- a/cl/beacon/synced_data/synced_data.go +++ b/cl/beacon/synced_data/synced_data.go @@ -17,22 +17,26 @@ package synced_data import ( + "errors" "fmt" - "runtime/debug" "sync" "sync/atomic" "time" "github.com/erigontech/erigon-lib/common" - "github.com/erigontech/erigon/cl/abstract" + "github.com/erigontech/erigon-lib/common/dbg" "github.com/erigontech/erigon/cl/clparams" "github.com/erigontech/erigon/cl/phase1/core/state" ) -const EnableDeadlockDetector = true +var ErrNotSynced = errors.New("not synced") var _ SyncedData = (*SyncedDataManager)(nil) +func EmptyCancel() {} + +const MinHeadStateDelay = 600 * time.Millisecond + type SyncedDataManager struct { enabled bool cfg *clparams.BeaconChainConfig @@ -40,15 +44,17 @@ type SyncedDataManager struct { headRoot atomic.Value headSlot atomic.Uint64 - headState *state.CachingBeaconState + headState *state.CachingBeaconState + minHeadStateDelay time.Duration mu sync.RWMutex } -func NewSyncedDataManager(enabled bool, cfg *clparams.BeaconChainConfig) *SyncedDataManager { +func NewSyncedDataManager(cfg *clparams.BeaconChainConfig, enabled bool, minHeadStateDelay time.Duration) *SyncedDataManager { return &SyncedDataManager{ - enabled: enabled, - cfg: cfg, + enabled: enabled, + cfg: cfg, + minHeadStateDelay: minHeadStateDelay, } } @@ -61,7 +67,7 @@ func (s *SyncedDataManager) OnHeadState(newState *state.CachingBeaconState) (err defer s.mu.Unlock() var blkRoot common.Hash - + start := time.Now() if s.headState == nil { s.headState, err = newState.Copy() } else { @@ -76,46 +82,38 @@ func (s *SyncedDataManager) OnHeadState(newState *state.CachingBeaconState) (err } s.headSlot.Store(newState.Slot()) s.headRoot.Store(blkRoot) + took := time.Since(start) + // Delay head update to avoid being out of sync with slower nodes. + if took < s.minHeadStateDelay { + time.Sleep(s.minHeadStateDelay - took) + } return err } -func EmptyCancel() {} - -func (s *SyncedDataManager) HeadState() (*state.CachingBeaconState, CancelFn) { +func (s *SyncedDataManager) ViewHeadState(fn ViewHeadStateFn) error { _, synced := s.headRoot.Load().(common.Hash) if !s.enabled || !synced { - return nil, EmptyCancel + return ErrNotSynced } - s.mu.RLock() - st := debug.Stack() - - ch := make(chan struct{}) - if EnableDeadlockDetector { + if dbg.CaplinSyncedDataMangerDeadlockDetection { + trace := dbg.Stack() + ch := make(chan struct{}) go func() { select { + case <-time.After(100 * time.Second): + fmt.Println("ViewHeadState timeout", trace) case <-ch: return - case <-time.After(100 * time.Second): - fmt.Println("Deadlock detected", string(st)) } }() + defer close(ch) } - - var mu sync.Once - - return s.headState, func() { - mu.Do(func() { - s.mu.RUnlock() - if EnableDeadlockDetector { - ch <- struct{}{} - } - }) + defer s.mu.RUnlock() + if err := fn(s.headState); err != nil { + return err } -} - -func (s *SyncedDataManager) HeadStateReader() (abstract.BeaconStateReader, CancelFn) { - return s.HeadState() + return nil } func (s *SyncedDataManager) Syncing() bool { @@ -140,3 +138,15 @@ func (s *SyncedDataManager) HeadRoot() common.Hash { } return root } + +func (s *SyncedDataManager) CommitteeCount(epoch uint64) uint64 { + return s.headState.CommitteeCount(epoch) +} + +func (s *SyncedDataManager) UnsetHeadState() { + s.mu.Lock() + defer s.mu.Unlock() + s.headRoot = atomic.Value{} + s.headSlot.Store(uint64(0)) + s.headState = nil +} diff --git a/cl/monitor/validator.go b/cl/monitor/validator.go index c64148ab468..eb7f9631cd9 100644 --- a/cl/monitor/validator.go +++ b/cl/monitor/validator.go @@ -123,20 +123,21 @@ func (m *validatorMonitorImpl) runReportProposerStatus() { ticker := time.NewTicker(time.Duration(m.beaconCfg.SecondsPerSlot) * time.Second) defer ticker.Stop() for range ticker.C { - headState, cn := m.syncedData.HeadState() + prevSlot := m.ethClock.GetCurrentSlot() - 1 - if headState == nil { - cn() + var proposerIndex uint64 + if err := m.syncedData.ViewHeadState(func(headState *state.CachingBeaconState) (err error) { + proposerIndex, err = headState.GetBeaconProposerIndexForSlot(prevSlot) + if err != nil { + return err + } + return nil + }); err != nil { + log.Warn("failed to get proposer index", "err", err, "slot", prevSlot) continue } // check proposer in previous slot - prevSlot := m.ethClock.GetCurrentSlot() - 1 - proposerIndex, err := headState.GetBeaconProposerIndexForSlot(prevSlot) - cn() - if err != nil { - log.Warn("failed to get proposer index", "slot", prevSlot, "err", err) - return - } + if status := m.vaidatorStatuses.getValidatorStatus(proposerIndex, prevSlot/m.beaconCfg.SlotsPerEpoch); status != nil { if status.proposeSlots.Contains(prevSlot) { metricProposerHit.AddInt(1) diff --git a/cl/phase1/core/state/accessors.go b/cl/phase1/core/state/accessors.go index c7d7304d467..e5245237ccc 100644 --- a/cl/phase1/core/state/accessors.go +++ b/cl/phase1/core/state/accessors.go @@ -120,7 +120,7 @@ func EligibleValidatorsIndicies(b abstract.BeaconState) (eligibleValidators []ui // We divide computation into multiple threads to speed up the process. numThreads := runtime.NumCPU() - wp := threading.CreateWorkerPool(numThreads) + wp := threading.NewParallelExecutor() eligibleValidatorsShards := make([][]uint64, numThreads) shardSize := b.ValidatorLength() / numThreads for i := range eligibleValidatorsShards { @@ -148,7 +148,7 @@ func EligibleValidatorsIndicies(b abstract.BeaconState) (eligibleValidators []ui return nil }) } - wp.WaitAndClose() + wp.Execute() // Merge the results from all threads. for i := range eligibleValidatorsShards { eligibleValidators = append(eligibleValidators, eligibleValidatorsShards[i]...) diff --git a/cl/phase1/core/state/cache.go b/cl/phase1/core/state/cache.go index 29bc6179bc5..a4da6a8e275 100644 --- a/cl/phase1/core/state/cache.go +++ b/cl/phase1/core/state/cache.go @@ -227,7 +227,7 @@ func (b *CachingBeaconState) _refreshActiveBalancesIfNeeded() { numWorkers := runtime.NumCPU() activeBalanceShards := make([]uint64, numWorkers) - wp := threading.CreateWorkerPool(numWorkers) + wp := threading.NewParallelExecutor() shardSize := b.ValidatorSet().Length() / numWorkers for i := 0; i < numWorkers; i++ { @@ -247,7 +247,7 @@ func (b *CachingBeaconState) _refreshActiveBalancesIfNeeded() { return nil }) } - wp.WaitAndClose() + wp.Execute() for _, shard := range activeBalanceShards { *b.totalActiveBalanceCache += shard diff --git a/cl/phase1/core/state/cache_accessors.go b/cl/phase1/core/state/cache_accessors.go index ea5a6461879..8b4794e97be 100644 --- a/cl/phase1/core/state/cache_accessors.go +++ b/cl/phase1/core/state/cache_accessors.go @@ -49,7 +49,7 @@ func (b *CachingBeaconState) GetActiveValidatorsIndices(epoch uint64) []uint64 { } numWorkers := runtime.NumCPU() - wp := threading.CreateWorkerPool(numWorkers) + wp := threading.NewParallelExecutor() indiciesShards := make([][]uint64, numWorkers) shardsJobSize := b.ValidatorLength() / numWorkers @@ -71,7 +71,7 @@ func (b *CachingBeaconState) GetActiveValidatorsIndices(epoch uint64) []uint64 { }) } - wp.WaitAndClose() + wp.Execute() for i := 0; i < numWorkers; i++ { indicies = append(indicies, indiciesShards[i]...) } @@ -273,10 +273,6 @@ func (b *CachingBeaconState) GetAttestationParticipationFlagIndicies( // GetBeaconCommitee grabs beacon committee using cache first func (b *CachingBeaconState) GetBeaconCommitee(slot, committeeIndex uint64) ([]uint64, error) { - // var cacheKey [16]byte - // binary.BigEndian.PutUint64(cacheKey[:], slot) - // binary.BigEndian.PutUint64(cacheKey[8:], committeeIndex) - epoch := GetEpochAtSlot(b.BeaconConfig(), slot) committeesPerSlot := b.CommitteeCount(epoch) indicies := b.GetActiveValidatorsIndices(epoch) diff --git a/cl/phase1/forkchoice/fork_choice_test.go b/cl/phase1/forkchoice/fork_choice_test.go index e7b074dee97..d484cbe03a1 100644 --- a/cl/phase1/forkchoice/fork_choice_test.go +++ b/cl/phase1/forkchoice/fork_choice_test.go @@ -67,7 +67,7 @@ func TestForkChoiceBasic(t *testing.T) { Root: libcommon.HexToHash("0x564d76d91f66c1fb2977484a6184efda2e1c26dd01992e048353230e10f83201"), Epoch: 0, } - sd := synced_data.NewSyncedDataManager(true, &clparams.MainnetBeaconConfig) + sd := synced_data.NewSyncedDataManager(&clparams.MainnetBeaconConfig, true, 0) // Decode test blocks block0x3a, block0xc2, block0xd4 := cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig, clparams.DenebVersion), cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig, clparams.DenebVersion), @@ -147,7 +147,7 @@ func TestForkChoiceChainBellatrix(t *testing.T) { // Initialize forkchoice store pool := pool.NewOperationsPool(&clparams.MainnetBeaconConfig) emitters := beaconevents.NewEventEmitter() - sd := synced_data.NewSyncedDataManager(true, &clparams.MainnetBeaconConfig) + sd := synced_data.NewSyncedDataManager(&clparams.MainnetBeaconConfig, true, 0) store, err := forkchoice.NewForkChoiceStore(nil, anchorState, nil, pool, fork_graph.NewForkGraphDisk(anchorState, afero.NewMemMapFs(), beacon_router_configuration.RouterConfiguration{ Beacon: true, }, emitters), emitters, sd, nil, nil, false) diff --git a/cl/phase1/forkchoice/forkchoice.go b/cl/phase1/forkchoice/forkchoice.go index ffe587cf052..8223a45d7a0 100644 --- a/cl/phase1/forkchoice/forkchoice.go +++ b/cl/phase1/forkchoice/forkchoice.go @@ -394,16 +394,6 @@ func (f *ForkChoiceStore) GetSyncCommittees(period uint64) (*solid.SyncCommittee return f.forkGraph.GetSyncCommittees(period) } -func (f *ForkChoiceStore) GetBeaconCommitee(slot, committeeIndex uint64) ([]uint64, error) { - headState, cn := f.syncedDataManager.HeadState() - defer cn() - if headState == nil { - return nil, nil - } - - return headState.GetBeaconCommitee(slot, committeeIndex) -} - func (f *ForkChoiceStore) BlockRewards(root libcommon.Hash) (*eth2.BlockRewardsCollector, bool) { return f.forkGraph.GetBlockRewards(root) } diff --git a/cl/phase1/forkchoice/interface.go b/cl/phase1/forkchoice/interface.go index 45abff2b7ba..fcee6a3a83d 100644 --- a/cl/phase1/forkchoice/interface.go +++ b/cl/phase1/forkchoice/interface.go @@ -54,7 +54,6 @@ type ForkChoiceStorageReader interface { blockRoot libcommon.Hash, ) (solid.Checkpoint, solid.Checkpoint, solid.Checkpoint, bool) GetSyncCommittees(period uint64) (*solid.SyncCommittee, *solid.SyncCommittee, bool) - GetBeaconCommitee(slot, committeeIndex uint64) ([]uint64, error) Slot() uint64 Time() uint64 Participation(epoch uint64) (*solid.ParticipationBitList, bool) diff --git a/cl/phase1/forkchoice/mock_services/forkchoice_mock.go b/cl/phase1/forkchoice/mock_services/forkchoice_mock.go index e108e59f4f3..a6bae30313f 100644 --- a/cl/phase1/forkchoice/mock_services/forkchoice_mock.go +++ b/cl/phase1/forkchoice/mock_services/forkchoice_mock.go @@ -212,13 +212,6 @@ func (f *ForkChoiceStorageMock) GetSyncCommittees( f.GetSyncCommitteesVal[period][1] != nil } -func (f *ForkChoiceStorageMock) GetBeaconCommitee(slot, committeeIndex uint64) ([]uint64, error) { - if f.GetBeaconCommitteeMock != nil { - return f.GetBeaconCommitteeMock(slot, committeeIndex) - } - return []uint64{1, 2, 3, 4, 5, 6, 7, 8}, nil -} - func (f *ForkChoiceStorageMock) Slot() uint64 { return f.SlotVal } diff --git a/cl/phase1/forkchoice/on_attestation.go b/cl/phase1/forkchoice/on_attestation.go index 16111f2dd6e..2739866f2ec 100644 --- a/cl/phase1/forkchoice/on_attestation.go +++ b/cl/phase1/forkchoice/on_attestation.go @@ -56,20 +56,24 @@ func (f *ForkChoiceStore) OnAttestation( return err } } - headState, cn := f.syncedDataManager.HeadState() - defer cn() + var attestationIndicies []uint64 var err error target := data.Target - if headState == nil { + if f.syncedDataManager.Syncing() { attestationIndicies, err = f.verifyAttestationWithCheckpointState( target, attestation, fromBlock, ) } else { - attestationIndicies, err = f.verifyAttestationWithState(headState, attestation, fromBlock) + if err := f.syncedDataManager.ViewHeadState(func(headState *state.CachingBeaconState) error { + attestationIndicies, err = f.verifyAttestationWithState(headState, attestation, fromBlock) + return err + }); err != nil { + return err + } } if err != nil { return err diff --git a/cl/phase1/forkchoice/on_attester_slashing.go b/cl/phase1/forkchoice/on_attester_slashing.go index fb29e466531..9879ffece91 100644 --- a/cl/phase1/forkchoice/on_attester_slashing.go +++ b/cl/phase1/forkchoice/on_attester_slashing.go @@ -35,25 +35,32 @@ func (f *ForkChoiceStore) OnAttesterSlashing(attesterSlashing *cltypes.AttesterS } f.mu.Lock() defer f.mu.Unlock() - // Check if this attestation is even slashable. - attestation1 := attesterSlashing.Attestation_1 - attestation2 := attesterSlashing.Attestation_2 - if !cltypes.IsSlashableAttestationData(attestation1.Data, attestation2.Data) { - return errors.New("attestation data is not slashable") - } - var err error - s, cn := f.syncedDataManager.HeadState() - defer cn() - if s == nil { - // Retrieve justified state - s, err = f.forkGraph.GetState(f.justifiedCheckpoint.Load().(solid.Checkpoint).Root, false) + + if f.syncedDataManager.Syncing() { + s, err := f.forkGraph.GetState(f.justifiedCheckpoint.Load().(solid.Checkpoint).Root, false) if err != nil { return err } + return f.onProcessAttesterSlashing(attesterSlashing, s, test) } + + return f.syncedDataManager.ViewHeadState(func(s *state.CachingBeaconState) error { + return f.onProcessAttesterSlashing(attesterSlashing, s, test) + }) +} + +func (f *ForkChoiceStore) onProcessAttesterSlashing(attesterSlashing *cltypes.AttesterSlashing, s *state.CachingBeaconState, test bool) error { if s == nil { return errors.New("no state accessible") } + + // Check if this attestation is even slashable. + attestation1 := attesterSlashing.Attestation_1 + attestation2 := attesterSlashing.Attestation_2 + if !cltypes.IsSlashableAttestationData(attestation1.Data, attestation2.Data) { + return errors.New("attestation data is not slashable") + } + attestation1PublicKeys, err := getIndexedAttestationPublicKeys(s, attestation1) if err != nil { return err diff --git a/cl/phase1/network/gossip_manager.go b/cl/phase1/network/gossip_manager.go index 9c9089d85f9..8e7d2d27a41 100644 --- a/cl/phase1/network/gossip_manager.go +++ b/cl/phase1/network/gossip_manager.go @@ -30,6 +30,7 @@ import ( sentinel "github.com/erigontech/erigon-lib/gointerfaces/sentinelproto" "github.com/erigontech/erigon-lib/types/ssz" "github.com/erigontech/erigon/cl/beacon/beaconevents" + "github.com/erigontech/erigon/cl/beacon/synced_data" "github.com/erigontech/erigon/cl/clparams" "github.com/erigontech/erigon/cl/cltypes" "github.com/erigontech/erigon/cl/cltypes/solid" @@ -142,7 +143,7 @@ func (g *GossipManager) onRecv(ctx context.Context, data *sentinel.GossipData, l if err := g.routeAndProcess(ctx, data); err != nil { return err } - if errors.Is(err, services.ErrIgnore) { + if errors.Is(err, services.ErrIgnore) || errors.Is(err, synced_data.ErrNotSynced) { return nil } if err != nil { diff --git a/cl/phase1/network/services/aggregate_and_proof_service.go b/cl/phase1/network/services/aggregate_and_proof_service.go index 2e72199cc3e..f511805ab75 100644 --- a/cl/phase1/network/services/aggregate_and_proof_service.go +++ b/cl/phase1/network/services/aggregate_and_proof_service.go @@ -98,11 +98,7 @@ func (a *aggregateAndProofServiceImpl) ProcessMessage( subnet *uint64, aggregateAndProof *cltypes.SignedAggregateAndProofData, ) error { - headState, cn := a.syncedDataManager.HeadState() - defer cn() - if headState == nil { - return ErrIgnore - } + selectionProof := aggregateAndProof.SignedAggregateAndProof.Message.SelectionProof aggregateData := aggregateAndProof.SignedAggregateAndProof.Message.Aggregate.Data aggregate := aggregateAndProof.SignedAggregateAndProof.Message.Aggregate @@ -110,10 +106,11 @@ func (a *aggregateAndProofServiceImpl) ProcessMessage( slot := aggregateAndProof.SignedAggregateAndProof.Message.Aggregate.Data.Slot committeeIndex := aggregateAndProof.SignedAggregateAndProof.Message.Aggregate.Data.CommitteeIndex - if aggregateData.Slot > headState.Slot() { + if aggregateData.Slot > a.syncedDataManager.HeadSlot() { a.scheduleAggregateForLaterProcessing(aggregateAndProof) return ErrIgnore } + epoch := slot / a.beaconCfg.SlotsPerEpoch clversion := a.beaconCfg.GetCurrentStateVersion(epoch) if clversion.AfterOrEqual(clparams.ElectraVersion) { @@ -123,90 +120,103 @@ func (a *aggregateAndProofServiceImpl) ProcessMessage( } committeeIndex = index } - // [IGNORE] the epoch of aggregate.data.slot is either the current or previous epoch (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- i.e. compute_epoch_at_slot(aggregate.data.slot) in (get_previous_epoch(state), get_current_epoch(state)) - if state.PreviousEpoch(headState) != epoch && state.Epoch(headState) != epoch { - return ErrIgnore - } - // [REJECT] The committee index is within the expected range -- i.e. index < get_committee_count_per_slot(state, aggregate.data.target.epoch). - committeeCountPerSlot := headState.CommitteeCount(target.Epoch) - if committeeIndex >= committeeCountPerSlot { - return errors.New("invalid committee index in aggregate and proof") - } - // [REJECT] The aggregate attestation's epoch matches its target -- i.e. aggregate.data.target.epoch == compute_epoch_at_slot(aggregate.data.slot) - if aggregateData.Target.Epoch != epoch { - return errors.New("invalid target epoch in aggregate and proof") - } - finalizedCheckpoint := a.forkchoiceStore.FinalizedCheckpoint() - finalizedSlot := finalizedCheckpoint.Epoch * a.beaconCfg.SlotsPerEpoch - // [IGNORE] The current finalized_checkpoint is an ancestor of the block defined by aggregate.data.beacon_block_root -- i.e. get_checkpoint_block(store, aggregate.data.beacon_block_root, finalized_checkpoint.epoch) == store.finalized_checkpoint.root - if a.forkchoiceStore.Ancestor( - aggregateData.BeaconBlockRoot, - finalizedSlot, - ) != finalizedCheckpoint.Root { - return ErrIgnore - } + var ( + aggregateVerificationData *AggregateVerificationData + attestingIndices []uint64 + seenIndex seenAggregateIndex + ) + if err := a.syncedDataManager.ViewHeadState(func(headState *state.CachingBeaconState) error { + // [IGNORE] the epoch of aggregate.data.slot is either the current or previous epoch (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- i.e. compute_epoch_at_slot(aggregate.data.slot) in (get_previous_epoch(state), get_current_epoch(state)) + if state.PreviousEpoch(headState) != epoch && state.Epoch(headState) != epoch { + return ErrIgnore + } - // [IGNORE] The block being voted for (aggregate.data.beacon_block_root) has been seen (via both gossip and non-gossip sources) (a client MAY queue aggregates for processing once block is retrieved). - if _, ok := a.forkchoiceStore.GetHeader(aggregateData.BeaconBlockRoot); !ok { - return ErrIgnore - } + // [REJECT] The committee index is within the expected range -- i.e. index < get_committee_count_per_slot(state, aggregate.data.target.epoch). + committeeCountPerSlot := headState.CommitteeCount(target.Epoch) + if committeeIndex >= committeeCountPerSlot { + return errors.New("invalid committee index in aggregate and proof") + } + // [REJECT] The aggregate attestation's epoch matches its target -- i.e. aggregate.data.target.epoch == compute_epoch_at_slot(aggregate.data.slot) + if aggregateData.Target.Epoch != epoch { + return errors.New("invalid target epoch in aggregate and proof") + } + finalizedCheckpoint := a.forkchoiceStore.FinalizedCheckpoint() + finalizedSlot := finalizedCheckpoint.Epoch * a.beaconCfg.SlotsPerEpoch + // [IGNORE] The current finalized_checkpoint is an ancestor of the block defined by aggregate.data.beacon_block_root -- i.e. get_checkpoint_block(store, aggregate.data.beacon_block_root, finalized_checkpoint.epoch) == store.finalized_checkpoint.root + if a.forkchoiceStore.Ancestor( + aggregateData.BeaconBlockRoot, + finalizedSlot, + ) != finalizedCheckpoint.Root { + return ErrIgnore + } - // [IGNORE] The aggregate is the first valid aggregate received for the aggregator with index aggregate_and_proof.aggregator_index for the epoch aggregate.data.target.epoch - seenIndex := seenAggregateIndex{ - epoch: target.Epoch, - index: aggregateAndProof.SignedAggregateAndProof.Message.AggregatorIndex, - } - if a.seenAggreatorIndexes.Contains(seenIndex) { - return ErrIgnore - } + // [IGNORE] The block being voted for (aggregate.data.beacon_block_root) has been seen (via both gossip and non-gossip sources) (a client MAY queue aggregates for processing once block is retrieved). + if _, ok := a.forkchoiceStore.GetHeader(aggregateData.BeaconBlockRoot); !ok { + return ErrIgnore + } - committee, err := headState.GetBeaconCommitee(slot, committeeIndex) - if err != nil { - return err - } - // [REJECT] The attestation has participants -- that is, len(get_attesting_indices(state, aggregate)) >= 1 - attestingIndices, err := headState.GetAttestingIndicies(aggregate, false) - if err != nil { - return err - } - if len(attestingIndices) == 0 { - return errors.New("no attesting indicies") - } + // [IGNORE] The aggregate is the first valid aggregate received for the aggregator with index aggregate_and_proof.aggregator_index for the epoch aggregate.data.target.epoch + seenIndex = seenAggregateIndex{ + epoch: target.Epoch, + index: aggregateAndProof.SignedAggregateAndProof.Message.AggregatorIndex, + } + if a.seenAggreatorIndexes.Contains(seenIndex) { + return ErrIgnore + } - monitor.ObserveNumberOfAggregateSignatures(len(attestingIndices)) + committee, err := headState.GetBeaconCommitee(slot, committeeIndex) + if err != nil { + return err + } + // [REJECT] The attestation has participants -- that is, len(get_attesting_indices(state, aggregate)) >= 1 + attestingIndices, err = headState.GetAttestingIndicies(aggregate, false) + if err != nil { + return err + } + if len(attestingIndices) == 0 { + return errors.New("no attesting indicies") + } - // [REJECT] The aggregator's validator index is within the committee -- i.e. aggregate_and_proof.aggregator_index in get_beacon_committee(state, aggregate.data.slot, index). - if !slices.Contains(committee, aggregateAndProof.SignedAggregateAndProof.Message.AggregatorIndex) { - return errors.New("committee index not in committee") - } - // [REJECT] The aggregate attestation's target block is an ancestor of the block named in the LMD vote -- i.e. get_checkpoint_block(store, aggregate.data.beacon_block_root, aggregate.data.target.epoch) == aggregate.data.target.root - if a.forkchoiceStore.Ancestor( - aggregateData.BeaconBlockRoot, - target.Epoch*a.beaconCfg.SlotsPerEpoch, - ) != target.Root { - return errors.New("invalid target block") - } - if a.test { - return nil - } + // [REJECT] The aggregator's validator index is within the committee -- i.e. aggregate_and_proof.aggregator_index in get_beacon_committee(state, aggregate.data.slot, index). + if !slices.Contains(committee, aggregateAndProof.SignedAggregateAndProof.Message.AggregatorIndex) { + return errors.New("committee index not in committee") + } + // [REJECT] The aggregate attestation's target block is an ancestor of the block named in the LMD vote -- i.e. get_checkpoint_block(store, aggregate.data.beacon_block_root, aggregate.data.target.epoch) == aggregate.data.target.root + if a.forkchoiceStore.Ancestor( + aggregateData.BeaconBlockRoot, + target.Epoch*a.beaconCfg.SlotsPerEpoch, + ) != target.Root { + return errors.New("invalid target block") + } + if a.test { + return nil + } - // [REJECT] aggregate_and_proof.selection_proof selects the validator as an aggregator for the slot -- i.e. is_aggregator(state, aggregate.data.slot, index, aggregate_and_proof.selection_proof) returns True. - if !state.IsAggregator(a.beaconCfg, uint64(len(committee)), committeeIndex, selectionProof) { - log.Warn("receveived aggregate and proof from invalid aggregator") - return errors.New("invalid aggregate and proof") - } + // [REJECT] aggregate_and_proof.selection_proof selects the validator as an aggregator for the slot -- i.e. is_aggregator(state, aggregate.data.slot, index, aggregate_and_proof.selection_proof) returns True. + if !state.IsAggregator(a.beaconCfg, uint64(len(committee)), committeeIndex, selectionProof) { + log.Warn("receveived aggregate and proof from invalid aggregator") + return errors.New("invalid aggregate and proof") + } - // aggregate signatures for later verification - aggregateVerificationData, err := GetSignaturesOnAggregate(headState, aggregateAndProof.SignedAggregateAndProof, attestingIndices) - if err != nil { + // aggregate signatures for later verification + aggregateVerificationData, err = GetSignaturesOnAggregate(headState, aggregateAndProof.SignedAggregateAndProof, attestingIndices) + if err != nil { + return err + } + + monitor.ObserveNumberOfAggregateSignatures(len(attestingIndices)) + monitor.ObserveAggregateQuality(len(attestingIndices), len(committee)) + monitor.ObserveCommitteeSize(float64(len(committee))) + return nil + }); err != nil { return err } - + if a.test { + return nil + } // further processing will be done after async signature verification aggregateVerificationData.F = func() { - monitor.ObserveAggregateQuality(len(attestingIndices), len(committee)) - monitor.ObserveCommitteeSize(float64(len(committee))) a.opPool.AttestationsPool.Insert( aggregateAndProof.SignedAggregateAndProof.Message.Aggregate.Signature, aggregateAndProof.SignedAggregateAndProof.Message.Aggregate, @@ -227,11 +237,8 @@ func (a *aggregateAndProofServiceImpl) ProcessMessage( // push the signatures to verify asynchronously and run final functions after that. a.batchSignatureVerifier.AsyncVerifyAggregateProof(aggregateVerificationData) - // As the logic goes, if we return ErrIgnore there will be no peer banning and further publishing - // gossip data into the network by the gossip manager. That's what we want because we will be doing that ourselves - // in BatchVerification function. After validating signatures, if they are valid we will publish the - // gossip ourselves or ban the peer which sent that particular invalid signature. return ErrIgnore + } func GetSignaturesOnAggregate( @@ -366,23 +373,24 @@ func (a *aggregateAndProofServiceImpl) scheduleAggregateForLaterProcessing( func (a *aggregateAndProofServiceImpl) loop(ctx context.Context) { ticker := time.NewTicker(attestationJobsIntervalTick) defer ticker.Stop() + keysToDel := make([][32]byte, 0) for { select { case <-ctx.Done(): return case <-ticker.C: } - + keysToDel = keysToDel[:0] a.aggregatesScheduledForLaterExecution.Range(func(key, value any) bool { if a.syncedDataManager.Syncing() { // Discard the job if we can't get the head state - a.aggregatesScheduledForLaterExecution.Delete(key.([32]byte)) + keysToDel = append(keysToDel, key.([32]byte)) return false } job := value.(*aggregateJob) // check if it has expired if time.Since(job.creationTime) > attestationJobExpiry { - a.aggregatesScheduledForLaterExecution.Delete(key.([32]byte)) + keysToDel = append(keysToDel, key.([32]byte)) return true } aggregateData := job.aggregate.SignedAggregateAndProof.Message.Aggregate.Data @@ -394,8 +402,11 @@ func (a *aggregateAndProofServiceImpl) loop(ctx context.Context) { return true } - a.aggregatesScheduledForLaterExecution.Delete(key.([32]byte)) + keysToDel = append(keysToDel, key.([32]byte)) return true }) + for _, key := range keysToDel { + a.aggregatesScheduledForLaterExecution.Delete(key) + } } } diff --git a/cl/phase1/network/services/aggregate_and_proof_service_test.go b/cl/phase1/network/services/aggregate_and_proof_service_test.go index 55affb37a0f..9e91f760860 100644 --- a/cl/phase1/network/services/aggregate_and_proof_service_test.go +++ b/cl/phase1/network/services/aggregate_and_proof_service_test.go @@ -18,6 +18,7 @@ package services import ( "context" + "sync" "testing" "github.com/stretchr/testify/require" @@ -73,7 +74,7 @@ func setupAggregateAndProofTest(t *testing.T) (AggregateAndProofService, *synced ctx, cn := context.WithCancel(context.Background()) cn() cfg := &clparams.MainnetBeaconConfig - syncedDataManager := synced_data.NewSyncedDataManager(true, cfg) + syncedDataManager := synced_data.NewSyncedDataManager(cfg, true, 0) forkchoiceMock := mock_services.NewForkChoiceStorageMock(t) p := pool.OperationsPool{} p.AttestationsPool = pool.NewOperationPool[libcommon.Bytes96, *solid.Attestation](100, "test") @@ -200,3 +201,15 @@ func TestAggregateAndProofSuccess(t *testing.T) { fcu.Headers[agg.SignedAggregateAndProof.Message.Aggregate.Data.BeaconBlockRoot] = &cltypes.BeaconBlockHeader{} require.NoError(t, aggService.ProcessMessage(context.Background(), nil, agg)) } + +func TestSyncMapRangeDeadlock(t *testing.T) { + var m sync.Map + m.Store(1, 1) + m.Store(2, 2) + m.Store(3, 3) + + m.Range(func(key, value any) bool { + m.Store(4, 5) + return true + }) +} diff --git a/cl/phase1/network/services/attestation_service.go b/cl/phase1/network/services/attestation_service.go index 5d944c5920e..d964fef2cc8 100644 --- a/cl/phase1/network/services/attestation_service.go +++ b/cl/phase1/network/services/attestation_service.go @@ -23,6 +23,7 @@ import ( "sync" "time" + "github.com/erigontech/erigon-lib/common" sentinel "github.com/erigontech/erigon-lib/gointerfaces/sentinelproto" "github.com/erigontech/erigon-lib/log/v3" "github.com/erigontech/erigon/cl/aggregation" @@ -32,6 +33,7 @@ import ( "github.com/erigontech/erigon/cl/cltypes/solid" "github.com/erigontech/erigon/cl/fork" "github.com/erigontech/erigon/cl/monitor" + "github.com/erigontech/erigon/cl/phase1/core/state" "github.com/erigontech/erigon/cl/phase1/core/state/lru" "github.com/erigontech/erigon/cl/phase1/forkchoice" "github.com/erigontech/erigon/cl/phase1/network/subnets" @@ -127,26 +129,6 @@ func (s *attestationService) ProcessMessage(ctx context.Context, subnet *uint64, } s.attestationProcessed.Add(key, struct{}{}) - beaconCommittee, err := s.forkchoiceStore.GetBeaconCommitee(slot, committeeIndex) - if err != nil { - return err - } - headState, cn := s.syncedDataManager.HeadStateReader() - defer cn() - - if headState == nil { - return ErrIgnore - } - // [REJECT] The committee index is within the expected range - committeeCount := computeCommitteeCountPerSlot(headState, slot, s.beaconCfg.SlotsPerEpoch) - if committeeIndex >= committeeCount { - return fmt.Errorf("committee index out of range, %d >= %d", committeeIndex, committeeCount) - } - // [REJECT] The attestation is for the correct subnet -- i.e. compute_subnet_for_attestation(committees_per_slot, attestation.data.slot, index) == subnet_id - subnetId := computeSubnetForAttestation(committeeCount, slot, committeeIndex, s.beaconCfg.SlotsPerEpoch, s.netCfg.AttestationSubnetCount) - if subnet == nil || subnetId != *subnet { - return errors.New("wrong subnet") - } // [IGNORE] attestation.data.slot is within the last ATTESTATION_PROPAGATION_SLOT_RANGE slots (within a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- // i.e. attestation.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= attestation.data.slot (a client MAY queue future attestations for processing at the appropriate slot). currentSlot := s.ethClock.GetCurrentSlot() @@ -158,59 +140,85 @@ func (s *attestationService) ProcessMessage(ctx context.Context, subnet *uint64, return errors.New("epoch mismatch") } - // [REJECT] The number of aggregation bits matches the committee size -- i.e. len(aggregation_bits) == len(get_beacon_committee(state, attestation.data.slot, index)). - bits := att.Attestation.AggregationBits.Bytes() - expectedAggregationBitsLength := len(beaconCommittee) - actualAggregationBitsLength := utils.GetBitlistLength(bits) - if actualAggregationBitsLength != expectedAggregationBitsLength { - return fmt.Errorf("aggregation bits count mismatch: %d != %d", actualAggregationBitsLength, expectedAggregationBitsLength) - } + var ( + domain []byte + pubKey common.Bytes48 + signature common.Bytes96 + ) + if err := s.syncedDataManager.ViewHeadState(func(headState *state.CachingBeaconState) error { + // [REJECT] The committee index is within the expected range + committeeCount := computeCommitteeCountPerSlot(headState, slot, s.beaconCfg.SlotsPerEpoch) + if committeeIndex >= committeeCount { + return fmt.Errorf("committee index out of range, %d >= %d", committeeIndex, committeeCount) + } + // [REJECT] The attestation is for the correct subnet -- i.e. compute_subnet_for_attestation(committees_per_slot, attestation.data.slot, index) == subnet_id + subnetId := computeSubnetForAttestation(committeeCount, slot, committeeIndex, s.beaconCfg.SlotsPerEpoch, s.netCfg.AttestationSubnetCount) + if subnet == nil || subnetId != *subnet { + return errors.New("wrong subnet") + } + + beaconCommittee, err := headState.GetBeaconCommitee(slot, committeeIndex) + if err != nil { + return err + } + + // [REJECT] The number of aggregation bits matches the committee size -- i.e. len(aggregation_bits) == len(get_beacon_committee(state, attestation.data.slot, index)). + bits := att.Attestation.AggregationBits.Bytes() + expectedAggregationBitsLength := len(beaconCommittee) + actualAggregationBitsLength := utils.GetBitlistLength(bits) + if actualAggregationBitsLength != expectedAggregationBitsLength { + return fmt.Errorf("aggregation bits count mismatch: %d != %d", actualAggregationBitsLength, expectedAggregationBitsLength) + } - //[REJECT] The attestation is unaggregated -- that is, it has exactly one participating validator (len([bit for bit in aggregation_bits if bit]) == 1, i.e. exactly 1 bit is set). - setBits := 0 - onBitIndex := 0 // Aggregationbits is []byte, so we need to iterate over all bits. - for i := 0; i < len(bits); i++ { - for j := 0; j < 8; j++ { - if bits[i]&(1<= len(beaconCommittee) { - continue + //[REJECT] The attestation is unaggregated -- that is, it has exactly one participating validator (len([bit for bit in aggregation_bits if bit]) == 1, i.e. exactly 1 bit is set). + setBits := 0 + onBitIndex := 0 // Aggregationbits is []byte, so we need to iterate over all bits. + for i := 0; i < len(bits); i++ { + for j := 0; j < 8; j++ { + if bits[i]&(1<= len(beaconCommittee) { + continue + } + setBits++ + onBitIndex = i*8 + j } - setBits++ - onBitIndex = i*8 + j } } - } - if setBits == 0 { - return ErrIgnore // Ignore if it is just an empty bitlist - } - if setBits != 1 { - return errors.New("attestation does not have exactly one participating validator") - } - // [IGNORE] There has been no other valid attestation seen on an attestation subnet that has an identical attestation.data.target.epoch and participating validator index. - if err != nil { - return err - } - if onBitIndex >= len(beaconCommittee) { - return errors.New("on bit index out of committee range") - } - // mark the validator as seen - vIndex := beaconCommittee[onBitIndex] - epochLastTime, ok := s.validatorAttestationSeen.Get(vIndex) - if ok && epochLastTime == targetEpoch { - return fmt.Errorf("validator already seen in target epoch %w", ErrIgnore) - } - s.validatorAttestationSeen.Add(vIndex, targetEpoch) + if setBits == 0 { + return ErrIgnore // Ignore if it is just an empty bitlist + } + if setBits != 1 { + return errors.New("attestation does not have exactly one participating validator") + } + // [IGNORE] There has been no other valid attestation seen on an attestation subnet that has an identical attestation.data.target.epoch and participating validator index. + if err != nil { + return err + } + if onBitIndex >= len(beaconCommittee) { + return errors.New("on bit index out of committee range") + } + // mark the validator as seen + vIndex := beaconCommittee[onBitIndex] + epochLastTime, ok := s.validatorAttestationSeen.Get(vIndex) + if ok && epochLastTime == targetEpoch { + return fmt.Errorf("validator already seen in target epoch %w", ErrIgnore) + } + s.validatorAttestationSeen.Add(vIndex, targetEpoch) - // [REJECT] The signature of attestation is valid. - signature := att.Attestation.Signature - pubKey, err := headState.ValidatorPublicKey(int(beaconCommittee[onBitIndex])) - if err != nil { - return fmt.Errorf("unable to get public key: %v", err) - } - domain, err := headState.GetDomain(s.beaconCfg.DomainBeaconAttester, targetEpoch) - if err != nil { - return fmt.Errorf("unable to get the domain: %v", err) + // [REJECT] The signature of attestation is valid. + signature = att.Attestation.Signature + pubKey, err = headState.ValidatorPublicKey(int(beaconCommittee[onBitIndex])) + if err != nil { + return fmt.Errorf("unable to get public key: %v", err) + } + domain, err = headState.GetDomain(s.beaconCfg.DomainBeaconAttester, targetEpoch) + if err != nil { + return fmt.Errorf("unable to get the domain: %v", err) + } + return nil + }); err != nil { + return err } signingRoot, err := computeSigningRoot(att.Attestation.Data, domain) if err != nil { diff --git a/cl/phase1/network/services/attestation_service_test.go b/cl/phase1/network/services/attestation_service_test.go index e68984e7b9e..f5fe793990e 100644 --- a/cl/phase1/network/services/attestation_service_test.go +++ b/cl/phase1/network/services/attestation_service_test.go @@ -28,10 +28,9 @@ import ( "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/types/ssz" "github.com/erigontech/erigon/cl/abstract" - mockState "github.com/erigontech/erigon/cl/abstract/mock_services" + "github.com/erigontech/erigon/cl/antiquary/tests" "github.com/erigontech/erigon/cl/beacon/beaconevents" "github.com/erigontech/erigon/cl/beacon/synced_data" - mockSync "github.com/erigontech/erigon/cl/beacon/synced_data/mock_services" "github.com/erigontech/erigon/cl/clparams" "github.com/erigontech/erigon/cl/cltypes" "github.com/erigontech/erigon/cl/cltypes/solid" @@ -63,8 +62,7 @@ type attestationTestSuite struct { suite.Suite gomockCtrl *gomock.Controller mockForkChoice *mock_services.ForkChoiceStorageMock - syncedData *mockSync.MockSyncedData - beaconStateReader *mockState.MockBeaconStateReader + syncedData synced_data.SyncedData committeeSubscibe *mockCommittee.MockCommitteeSubscribe ethClock *eth_clock.MockEthereumClock attService AttestationService @@ -74,8 +72,9 @@ type attestationTestSuite struct { func (t *attestationTestSuite) SetupTest() { t.gomockCtrl = gomock.NewController(t.T()) t.mockForkChoice = &mock_services.ForkChoiceStorageMock{} - t.syncedData = mockSync.NewMockSyncedData(t.gomockCtrl) - t.beaconStateReader = mockState.NewMockBeaconStateReader(t.gomockCtrl) + _, st, _ := tests.GetBellatrixRandom() + t.syncedData = synced_data.NewSyncedDataManager(&clparams.MainnetBeaconConfig, true, 0) + t.syncedData.OnHeadState(st) t.committeeSubscibe = mockCommittee.NewMockCommitteeSubscribe(t.gomockCtrl) t.ethClock = eth_clock.NewMockEthereumClock(t.gomockCtrl) t.beaconConfig = &clparams.BeaconChainConfig{SlotsPerEpoch: mockSlotsPerEpoch} @@ -108,11 +107,11 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { { name: "Test attestation with committee index out of range", mock: func() { - t.syncedData.EXPECT().HeadStateReader().Return(t.beaconStateReader, synced_data.EmptyCancel).Times(1) computeCommitteeCountPerSlot = func(_ abstract.BeaconStateReader, _, _ uint64) uint64 { return 1 } t.ethClock.EXPECT().GetEpochAtSlot(mockSlot).Return(mockEpoch).Times(1) + t.ethClock.EXPECT().GetCurrentSlot().Return(mockSlot).Times(1) }, args: args{ ctx: context.Background(), @@ -124,13 +123,13 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { { name: "Test attestation with wrong subnet", mock: func() { - t.syncedData.EXPECT().HeadStateReader().Return(t.beaconStateReader, synced_data.EmptyCancel).Times(1) computeCommitteeCountPerSlot = func(_ abstract.BeaconStateReader, _, _ uint64) uint64 { return 5 } computeSubnetForAttestation = func(_, _, _, _, _ uint64) uint64 { return 2 } + t.ethClock.EXPECT().GetCurrentSlot().Return(mockSlot).Times(1) t.ethClock.EXPECT().GetEpochAtSlot(mockSlot).Return(mockEpoch).Times(1) }, args: args{ @@ -143,7 +142,6 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { { name: "Test attestation with wrong slot (current_slot < slot)", mock: func() { - t.syncedData.EXPECT().HeadStateReader().Return(t.beaconStateReader, synced_data.EmptyCancel).Times(1) computeCommitteeCountPerSlot = func(_ abstract.BeaconStateReader, _, _ uint64) uint64 { return 5 } @@ -163,7 +161,6 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { { name: "Attestation is aggregated", mock: func() { - t.syncedData.EXPECT().HeadStateReader().Return(t.beaconStateReader, synced_data.EmptyCancel).Times(1) computeCommitteeCountPerSlot = func(_ abstract.BeaconStateReader, _, _ uint64) uint64 { return 5 } @@ -187,7 +184,6 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { { name: "Attestation is empty", mock: func() { - t.syncedData.EXPECT().HeadStateReader().Return(t.beaconStateReader, synced_data.EmptyCancel).Times(1) computeCommitteeCountPerSlot = func(_ abstract.BeaconStateReader, _, _ uint64) uint64 { return 5 } @@ -211,7 +207,6 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { { name: "invalid signature", mock: func() { - t.syncedData.EXPECT().HeadStateReader().Return(t.beaconStateReader, synced_data.EmptyCancel).Times(1) computeCommitteeCountPerSlot = func(_ abstract.BeaconStateReader, _, _ uint64) uint64 { return 5 } @@ -220,8 +215,6 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { } t.ethClock.EXPECT().GetEpochAtSlot(mockSlot).Return(mockEpoch).Times(1) t.ethClock.EXPECT().GetCurrentSlot().Return(mockSlot).Times(1) - t.beaconStateReader.EXPECT().ValidatorPublicKey(gomock.Any()).Return(common.Bytes48{}, nil).Times(1) - t.beaconStateReader.EXPECT().GetDomain(t.beaconConfig.DomainBeaconAttester, att.Data.Target.Epoch).Return([]byte{}, nil).Times(1) computeSigningRoot = func(obj ssz.HashableSSZ, domain []byte) ([32]byte, error) { return [32]byte{}, nil } @@ -236,7 +229,6 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { { name: "block header not found", mock: func() { - t.syncedData.EXPECT().HeadStateReader().Return(t.beaconStateReader, synced_data.EmptyCancel).Times(1) computeCommitteeCountPerSlot = func(_ abstract.BeaconStateReader, _, _ uint64) uint64 { return 8 } @@ -245,8 +237,6 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { } t.ethClock.EXPECT().GetEpochAtSlot(mockSlot).Return(mockEpoch).Times(1) t.ethClock.EXPECT().GetCurrentSlot().Return(mockSlot).Times(1) - t.beaconStateReader.EXPECT().ValidatorPublicKey(gomock.Any()).Return(common.Bytes48{}, nil).Times(1) - t.beaconStateReader.EXPECT().GetDomain(t.beaconConfig.DomainBeaconAttester, att.Data.Target.Epoch).Return([]byte{}, nil).Times(1) computeSigningRoot = func(obj ssz.HashableSSZ, domain []byte) ([32]byte, error) { return [32]byte{}, nil } @@ -261,7 +251,6 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { { name: "invalid target block", mock: func() { - t.syncedData.EXPECT().HeadStateReader().Return(t.beaconStateReader, synced_data.EmptyCancel).Times(1) computeCommitteeCountPerSlot = func(_ abstract.BeaconStateReader, _, _ uint64) uint64 { return 8 } @@ -270,8 +259,6 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { } t.ethClock.EXPECT().GetEpochAtSlot(mockSlot).Return(mockEpoch).Times(1) t.ethClock.EXPECT().GetCurrentSlot().Return(mockSlot).Times(1) - t.beaconStateReader.EXPECT().ValidatorPublicKey(gomock.Any()).Return(common.Bytes48{}, nil).Times(1) - t.beaconStateReader.EXPECT().GetDomain(t.beaconConfig.DomainBeaconAttester, att.Data.Target.Epoch).Return([]byte{}, nil).Times(1) computeSigningRoot = func(obj ssz.HashableSSZ, domain []byte) ([32]byte, error) { return [32]byte{}, nil } @@ -289,7 +276,6 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { { name: "invalid finality checkpoint", mock: func() { - t.syncedData.EXPECT().HeadStateReader().Return(t.beaconStateReader, synced_data.EmptyCancel).Times(1) computeCommitteeCountPerSlot = func(_ abstract.BeaconStateReader, _, _ uint64) uint64 { return 8 } @@ -298,8 +284,6 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { } t.ethClock.EXPECT().GetEpochAtSlot(mockSlot).Return(mockEpoch).Times(1) t.ethClock.EXPECT().GetCurrentSlot().Return(mockSlot).Times(1) - t.beaconStateReader.EXPECT().ValidatorPublicKey(gomock.Any()).Return(common.Bytes48{}, nil).Times(1) - t.beaconStateReader.EXPECT().GetDomain(t.beaconConfig.DomainBeaconAttester, att.Data.Target.Epoch).Return([]byte{}, nil).Times(1) computeSigningRoot = func(obj ssz.HashableSSZ, domain []byte) ([32]byte, error) { return [32]byte{}, nil } @@ -323,7 +307,6 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { { name: "success", mock: func() { - t.syncedData.EXPECT().HeadStateReader().Return(t.beaconStateReader, synced_data.EmptyCancel).Times(1) computeCommitteeCountPerSlot = func(_ abstract.BeaconStateReader, _, _ uint64) uint64 { return 8 } @@ -332,8 +315,6 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { } t.ethClock.EXPECT().GetEpochAtSlot(mockSlot).Return(mockEpoch).Times(1) t.ethClock.EXPECT().GetCurrentSlot().Return(mockSlot).Times(1) - t.beaconStateReader.EXPECT().ValidatorPublicKey(gomock.Any()).Return(common.Bytes48{}, nil).Times(1) - t.beaconStateReader.EXPECT().GetDomain(t.beaconConfig.DomainBeaconAttester, att.Data.Target.Epoch).Return([]byte{}, nil).Times(1) computeSigningRoot = func(obj ssz.HashableSSZ, domain []byte) ([32]byte, error) { return [32]byte{}, nil } diff --git a/cl/phase1/network/services/blob_sidecar_service.go b/cl/phase1/network/services/blob_sidecar_service.go index b90897b64d6..a1c0f172478 100644 --- a/cl/phase1/network/services/blob_sidecar_service.go +++ b/cl/phase1/network/services/blob_sidecar_service.go @@ -26,14 +26,15 @@ import ( "github.com/Giulio2002/bls" gokzg4844 "github.com/crate-crypto/go-kzg-4844" + "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/crypto/kzg" - "github.com/erigontech/erigon-lib/log/v3" "github.com/erigontech/erigon/cl/beacon/beaconevents" "github.com/erigontech/erigon/cl/beacon/synced_data" "github.com/erigontech/erigon/cl/clparams" "github.com/erigontech/erigon/cl/cltypes" "github.com/erigontech/erigon/cl/fork" "github.com/erigontech/erigon/cl/monitor" + "github.com/erigontech/erigon/cl/phase1/core/state" "github.com/erigontech/erigon/cl/phase1/forkchoice" "github.com/erigontech/erigon/cl/utils" "github.com/erigontech/erigon/cl/utils/eth_clock" @@ -73,7 +74,7 @@ func NewBlobSidecarService( ethClock: ethClock, emitters: emitters, } - go b.loop(ctx) + // go b.loop(ctx) return b } @@ -159,26 +160,31 @@ func (b *blobSidecarService) verifySidecarsSignature(header *cltypes.SignedBeaco currentVersion := b.beaconCfg.GetCurrentStateVersion(parentHeader.Slot / b.beaconCfg.SlotsPerEpoch) forkVersion := b.beaconCfg.GetForkVersionByVersion(currentVersion) + var ( + domain []byte + pk common.Bytes48 + err error + ) // Load head state - headState, cn := b.syncedDataManager.HeadState() - defer cn() - if headState == nil { - return ErrIgnore - } - domain, err := fork.ComputeDomain(b.beaconCfg.DomainBeaconProposer[:], utils.Uint32ToBytes4(forkVersion), headState.GenesisValidatorsRoot()) - if err != nil { - return err - } - sigRoot, err := fork.ComputeSigningRoot(header.Header, domain) - if err != nil { + if err := b.syncedDataManager.ViewHeadState(func(headState *state.CachingBeaconState) error { + domain, err = fork.ComputeDomain(b.beaconCfg.DomainBeaconProposer[:], utils.Uint32ToBytes4(forkVersion), headState.GenesisValidatorsRoot()) + if err != nil { + return err + } + + pk, err = headState.ValidatorPublicKey(int(header.Header.ProposerIndex)) + if err != nil { + return err + } + return nil + }); err != nil { return err } - pk, err := headState.ValidatorPublicKey(int(header.Header.ProposerIndex)) + sigRoot, err := fork.ComputeSigningRoot(header.Header, domain) if err != nil { return err } - cn() if ok, err = bls.Verify(header.Signature[:], sigRoot[:], pk[:]); err != nil { return err @@ -201,43 +207,43 @@ func (b *blobSidecarService) scheduleBlobSidecarForLaterExecution(blobSidecar *c b.blobSidecarsScheduledForLaterExecution.Store(blobSidecarHash, blobSidecarJob) } -// loop is the main loop of the block service -func (b *blobSidecarService) loop(ctx context.Context) { - ticker := time.NewTicker(blobJobsIntervalTick) - defer ticker.Stop() - if b.test { - return - } - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - } - - b.blobSidecarsScheduledForLaterExecution.Range(func(key, value any) bool { - job := value.(*blobSidecarJob) - // check if it has expired - if time.Since(job.creationTime) > blobJobExpiry { - b.blobSidecarsScheduledForLaterExecution.Delete(key.([32]byte)) - return true - } - blockRoot, err := job.blobSidecar.SignedBlockHeader.Header.HashSSZ() - if err != nil { - log.Debug("blob sidecar verification failed", "err", err) - return true - } - if _, has := b.forkchoiceStore.GetHeader(blockRoot); has { - b.blobSidecarsScheduledForLaterExecution.Delete(key.([32]byte)) - return true - } - if err := b.verifyAndStoreBlobSidecar(job.blobSidecar); err != nil { - log.Trace("blob sidecar verification failed", "err", err, - "slot", job.blobSidecar.SignedBlockHeader.Header.Slot) - return true - } - b.blobSidecarsScheduledForLaterExecution.Delete(key.([32]byte)) - return true - }) - } -} +// // loop is the main loop of the block service +// func (b *blobSidecarService) loop(ctx context.Context) { +// ticker := time.NewTicker(blobJobsIntervalTick) +// defer ticker.Stop() +// if b.test { +// return +// } +// for { +// select { +// case <-ctx.Done(): +// return +// case <-ticker.C: +// } + +// b.blobSidecarsScheduledForLaterExecution.Range(func(key, value any) bool { +// job := value.(*blobSidecarJob) +// // check if it has expired +// if time.Since(job.creationTime) > blobJobExpiry { +// b.blobSidecarsScheduledForLaterExecution.Delete(key.([32]byte)) +// return true +// } +// blockRoot, err := job.blobSidecar.SignedBlockHeader.Header.HashSSZ() +// if err != nil { +// log.Debug("blob sidecar verification failed", "err", err) +// return true +// } +// if _, has := b.forkchoiceStore.GetHeader(blockRoot); has { +// b.blobSidecarsScheduledForLaterExecution.Delete(key.([32]byte)) +// return true +// } +// if err := b.verifyAndStoreBlobSidecar(job.blobSidecar); err != nil { +// log.Trace("blob sidecar verification failed", "err", err, +// "slot", job.blobSidecar.SignedBlockHeader.Header.Slot) +// return true +// } +// b.blobSidecarsScheduledForLaterExecution.Delete(key.([32]byte)) +// return true +// }) +// } +// } diff --git a/cl/phase1/network/services/blob_sidecar_service_test.go b/cl/phase1/network/services/blob_sidecar_service_test.go index 628159595c7..da227994680 100644 --- a/cl/phase1/network/services/blob_sidecar_service_test.go +++ b/cl/phase1/network/services/blob_sidecar_service_test.go @@ -73,7 +73,7 @@ func setupBlobSidecarService(t *testing.T, ctrl *gomock.Controller, test bool) ( ctx2, cn := context.WithTimeout(ctx, 1) cn() cfg := &clparams.MainnetBeaconConfig - syncedDataManager := synced_data.NewSyncedDataManager(true, cfg) + syncedDataManager := synced_data.NewSyncedDataManager(cfg, true, 0) ethClock := eth_clock.NewMockEthereumClock(ctrl) forkchoiceMock := mock_services.NewForkChoiceStorageMock(t) emitters := beaconevents.NewEventEmitter() diff --git a/cl/phase1/network/services/block_service.go b/cl/phase1/network/services/block_service.go index d069e0e137e..060f0794c1e 100644 --- a/cl/phase1/network/services/block_service.go +++ b/cl/phase1/network/services/block_service.go @@ -31,6 +31,7 @@ import ( "github.com/erigontech/erigon/cl/cltypes" "github.com/erigontech/erigon/cl/cltypes/solid" "github.com/erigontech/erigon/cl/persistence/beacon_indicies" + "github.com/erigontech/erigon/cl/phase1/core/state" "github.com/erigontech/erigon/cl/phase1/core/state/lru" "github.com/erigontech/erigon/cl/phase1/forkchoice" "github.com/erigontech/erigon/cl/transition/impl/eth2" @@ -120,23 +121,30 @@ func (b *blockService) ProcessMessage(ctx context.Context, _ *uint64, msg *cltyp return ErrIgnore } - headState, cn := b.syncedData.HeadState() - defer cn() - if headState == nil { - b.scheduleBlockForLaterProcessing(msg) - return ErrIgnore - } - - // [IGNORE] The block is from a slot greater than the latest finalized slot -- i.e. validate that signed_beacon_block.message.slot > compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) - // (a client MAY choose to validate and store such blocks for additional purposes -- e.g. slashing detection, archive nodes, etc). - if blockEpoch <= headState.FinalizedCheckpoint().Epoch { - return ErrIgnore - } + // headState, cn := b.syncedData.HeadState() + // defer cn() + // if headState == nil { + // b.scheduleBlockForLaterProcessing(msg) + // return ErrIgnore + // } + if err := b.syncedData.ViewHeadState(func(headState *state.CachingBeaconState) error { + // [IGNORE] The block is from a slot greater than the latest finalized slot -- i.e. validate that signed_beacon_block.message.slot > compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + // (a client MAY choose to validate and store such blocks for additional purposes -- e.g. slashing detection, archive nodes, etc). + if blockEpoch <= headState.FinalizedCheckpoint().Epoch { + return ErrIgnore + } - if ok, err := eth2.VerifyBlockSignature(headState, msg); err != nil { + if ok, err := eth2.VerifyBlockSignature(headState, msg); err != nil { + return err + } else if !ok { + return ErrInvalidSignature + } + return nil + }); err != nil { + if errors.Is(err, ErrIgnore) { + b.scheduleBlockForLaterProcessing(msg) + } return err - } else if !ok { - return ErrInvalidSignature } // [IGNORE] The block's parent (defined by block.parent_root) has been seen (via both gossip and non-gossip sources) (a client MAY queue blocks for processing once the parent block is retrieved). @@ -153,7 +161,7 @@ func (b *blockService) ProcessMessage(ctx context.Context, _ *uint64, msg *cltyp if msg.Block.Body.BlobKzgCommitments.Len() > int(b.beaconCfg.MaxBlobsPerBlock) { return ErrInvalidCommitmentsCount } - cn() + b.publishBlockGossipEvent(msg) // the rest of the validation is done in the forkchoice store if err := b.processAndStoreBlock(ctx, msg); err != nil { diff --git a/cl/phase1/network/services/block_service_test.go b/cl/phase1/network/services/block_service_test.go index 772e8a85a57..5c49dde3f5f 100644 --- a/cl/phase1/network/services/block_service_test.go +++ b/cl/phase1/network/services/block_service_test.go @@ -36,7 +36,7 @@ import ( func setupBlockService(t *testing.T, ctrl *gomock.Controller) (BlockService, *synced_data.SyncedDataManager, *eth_clock.MockEthereumClock, *mock_services.ForkChoiceStorageMock) { db := memdb.NewTestDB(t) cfg := &clparams.MainnetBeaconConfig - syncedDataManager := synced_data.NewSyncedDataManager(true, cfg) + syncedDataManager := synced_data.NewSyncedDataManager(cfg, true, 0) ethClock := eth_clock.NewMockEthereumClock(ctrl) forkchoiceMock := mock_services.NewForkChoiceStorageMock(t) blockService := NewBlockService(context.Background(), db, forkchoiceMock, syncedDataManager, ethClock, cfg, nil) diff --git a/cl/phase1/network/services/bls_to_execution_change_service.go b/cl/phase1/network/services/bls_to_execution_change_service.go index c7c59a1b59f..2a9a8cfe347 100644 --- a/cl/phase1/network/services/bls_to_execution_change_service.go +++ b/cl/phase1/network/services/bls_to_execution_change_service.go @@ -23,11 +23,13 @@ import ( "fmt" "github.com/Giulio2002/bls" + "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon/cl/beacon/beaconevents" "github.com/erigontech/erigon/cl/beacon/synced_data" "github.com/erigontech/erigon/cl/clparams" "github.com/erigontech/erigon/cl/cltypes" "github.com/erigontech/erigon/cl/fork" + "github.com/erigontech/erigon/cl/phase1/core/state" "github.com/erigontech/erigon/cl/pool" "github.com/erigontech/erigon/cl/utils" ) @@ -68,23 +70,29 @@ func (s *blsToExecutionChangeService) ProcessMessage(ctx context.Context, subnet return ErrIgnore } change := msg.SignedBLSToExecutionChange.Message - stateReader, cn := s.syncedDataManager.HeadStateReader() - defer cn() - if stateReader == nil { - return ErrIgnore - } - // [IGNORE] current_epoch >= CAPELLA_FORK_EPOCH, where current_epoch is defined by the current wall-clock time. - if stateReader.Version() < clparams.CapellaVersion { - return ErrIgnore - } - // ref: https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#new-process_bls_to_execution_change - // assert address_change.validator_index < len(state.validators) - validator, err := stateReader.ValidatorForValidatorIndex(int(change.ValidatorIndex)) - if err != nil { - return fmt.Errorf("unable to retrieve validator: %v", err) + var ( + wc, genesisValidatorRoot common.Hash + ) + if err := s.syncedDataManager.ViewHeadState(func(stateReader *state.CachingBeaconState) error { + // [IGNORE] current_epoch >= CAPELLA_FORK_EPOCH, where current_epoch is defined by the current wall-clock time. + if stateReader.Version() < clparams.CapellaVersion { + return ErrIgnore + } + // ref: https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#new-process_bls_to_execution_change + // assert address_change.validator_index < len(state.validators) + validator, err := stateReader.ValidatorForValidatorIndex(int(change.ValidatorIndex)) + if err != nil { + return fmt.Errorf("unable to retrieve validator: %v", err) + } + wc = validator.WithdrawalCredentials() + + // assert bls.Verify(address_change.from_bls_pubkey, signing_root, signed_address_change.signature) + genesisValidatorRoot = stateReader.GenesisValidatorsRoot() + return nil + }); err != nil { + return err } - wc := validator.WithdrawalCredentials() // assert validator.withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX if wc[0] != byte(s.beaconCfg.BLSWithdrawalPrefixByte) { @@ -99,10 +107,6 @@ func (s *blsToExecutionChangeService) ProcessMessage(ctx context.Context, subnet return errors.New("invalid withdrawal credentials hash") } - // assert bls.Verify(address_change.from_bls_pubkey, signing_root, signed_address_change.signature) - genesisValidatorRoot := stateReader.GenesisValidatorsRoot() - cn() - domain, err := fork.ComputeDomain(s.beaconCfg.DomainBLSToExecutionChange[:], utils.Uint32ToBytes4(uint32(s.beaconCfg.GenesisForkVersion)), genesisValidatorRoot) if err != nil { return err diff --git a/cl/phase1/network/services/bls_to_execution_change_service_test.go b/cl/phase1/network/services/bls_to_execution_change_service_test.go index c0da2aec778..276662e9817 100644 --- a/cl/phase1/network/services/bls_to_execution_change_service_test.go +++ b/cl/phase1/network/services/bls_to_execution_change_service_test.go @@ -27,14 +27,13 @@ import ( "github.com/erigontech/erigon-lib/common" mockState "github.com/erigontech/erigon/cl/abstract/mock_services" + "github.com/erigontech/erigon/cl/antiquary/tests" "github.com/erigontech/erigon/cl/beacon/beaconevents" "github.com/erigontech/erigon/cl/beacon/synced_data" - mockSync "github.com/erigontech/erigon/cl/beacon/synced_data/mock_services" "github.com/erigontech/erigon/cl/clparams" "github.com/erigontech/erigon/cl/cltypes" "github.com/erigontech/erigon/cl/cltypes/solid" "github.com/erigontech/erigon/cl/pool" - "github.com/erigontech/erigon/cl/utils" "github.com/stretchr/testify/suite" "go.uber.org/mock/gomock" ) @@ -44,7 +43,7 @@ type blsToExecutionChangeTestSuite struct { gomockCtrl *gomock.Controller operationsPool *pool.OperationsPool emitters *beaconevents.EventEmitter - syncedData *mockSync.MockSyncedData + syncedData synced_data.SyncedData beaconCfg *clparams.BeaconChainConfig service BLSToExecutionChangeService @@ -56,11 +55,14 @@ func (t *blsToExecutionChangeTestSuite) SetupTest() { t.operationsPool = &pool.OperationsPool{ BLSToExecutionChangesPool: pool.NewOperationPool[common.Bytes96, *cltypes.SignedBLSToExecutionChange](10, "blsToExecutionChangesPool"), } - t.syncedData = mockSync.NewMockSyncedData(t.gomockCtrl) + _, st, _ := tests.GetCapellaRandom() + t.syncedData = synced_data.NewSyncedDataManager(&clparams.MainnetBeaconConfig, true, 0) + t.syncedData.OnHeadState(st) t.emitters = beaconevents.NewEventEmitter() t.beaconCfg = &clparams.BeaconChainConfig{} batchSignatureVerifier := NewBatchSignatureVerifier(context.TODO(), nil) go batchSignatureVerifier.Start() + t.service = NewBLSToExecutionChangeService(*t.operationsPool, t.emitters, t.syncedData, t.beaconCfg, batchSignatureVerifier) // mock global functions t.mockFuncs = &mockFuncs{ @@ -107,9 +109,8 @@ func (t *blsToExecutionChangeTestSuite) TestProcessMessage() { { name: "version is less than CapellaVersion", mock: func() { - mockStateReader := mockState.NewMockBeaconStateReader(t.gomockCtrl) - mockStateReader.EXPECT().Version().Return(clparams.CapellaVersion - 1).AnyTimes() - t.syncedData.EXPECT().HeadStateReader().Return(mockStateReader, synced_data.EmptyCancel).AnyTimes() + _, st, _ := tests.GetBellatrixRandom() + t.syncedData.OnHeadState(st) }, msg: mockMsg, wantErr: true, @@ -121,7 +122,6 @@ func (t *blsToExecutionChangeTestSuite) TestProcessMessage() { mockStateReader := mockState.NewMockBeaconStateReader(t.gomockCtrl) mockStateReader.EXPECT().Version().Return(clparams.CapellaVersion).AnyTimes() mockStateReader.EXPECT().ValidatorForValidatorIndex(int(mockMsg.SignedBLSToExecutionChange.Message.ValidatorIndex)).Return(nil, errors.New("not found")).AnyTimes() - t.syncedData.EXPECT().HeadStateReader().Return(mockStateReader, synced_data.EmptyCancel).AnyTimes() }, msg: mockMsg, wantErr: true, @@ -134,7 +134,6 @@ func (t *blsToExecutionChangeTestSuite) TestProcessMessage() { mockValidator.SetWithdrawalCredentials([32]byte{1, 1, 1}) // should be equal to BLS_WITHDRAWAL_PREFIX mockStateReader.EXPECT().Version().Return(clparams.CapellaVersion).AnyTimes() mockStateReader.EXPECT().ValidatorForValidatorIndex(int(mockMsg.SignedBLSToExecutionChange.Message.ValidatorIndex)).Return(mockValidator, nil).AnyTimes() - t.syncedData.EXPECT().HeadStateReader().Return(mockStateReader, synced_data.EmptyCancel).AnyTimes() }, msg: mockMsg, wantErr: true, @@ -147,57 +146,54 @@ func (t *blsToExecutionChangeTestSuite) TestProcessMessage() { mockValidator.SetWithdrawalCredentials([32]byte{0}) // first byte is equal to BLS_WITHDRAWAL_PREFIX mockStateReader.EXPECT().Version().Return(clparams.CapellaVersion).AnyTimes() mockStateReader.EXPECT().ValidatorForValidatorIndex(int(mockMsg.SignedBLSToExecutionChange.Message.ValidatorIndex)).Return(mockValidator, nil).AnyTimes() - t.syncedData.EXPECT().HeadStateReader().Return(mockStateReader, synced_data.EmptyCancel).AnyTimes() }, msg: mockMsg, wantErr: true, }, - { - name: "invalid bls signature", - mock: func() { - mockStateReader := mockState.NewMockBeaconStateReader(t.gomockCtrl) - mockValidator := solid.NewValidator() - hashedFrom := utils.Sha256(mockMsg.SignedBLSToExecutionChange.Message.From[:]) - wc := [32]byte{0} - copy(wc[1:], hashedFrom[1:]) - mockValidator.SetWithdrawalCredentials(wc) - mockStateReader.EXPECT().Version().Return(clparams.CapellaVersion).AnyTimes() - mockStateReader.EXPECT().ValidatorForValidatorIndex(int(mockMsg.SignedBLSToExecutionChange.Message.ValidatorIndex)).Return(mockValidator, nil).AnyTimes() - t.syncedData.EXPECT().HeadStateReader().Return(mockStateReader, synced_data.EmptyCancel).AnyTimes() - mockStateReader.EXPECT().GenesisValidatorsRoot().Return([32]byte{}).AnyTimes() - // bls verify - t.gomockCtrl.RecordCall(t.mockFuncs, "ComputeSigningRoot", mockMsg.SignedBLSToExecutionChange.Message, gomock.Any()).Return([32]byte{}, nil).AnyTimes() - t.gomockCtrl.RecordCall(t.mockFuncs, "BlsVerifyMultipleSignatures", gomock.Any(), gomock.Any(), gomock.Any()).Return(false, nil).Times(2) - }, - msg: mockMsg, - specificErr: ErrInvalidBlsSignature, - wantErr: true, - }, - { - name: "pass", - mock: func() { - mockStateReader := mockState.NewMockBeaconStateReader(t.gomockCtrl) - mockValidator := solid.NewValidator() - hashedFrom := utils.Sha256(mockMsg.SignedBLSToExecutionChange.Message.From[:]) - wc := [32]byte{0} - copy(wc[1:], hashedFrom[1:]) - mockValidator.SetWithdrawalCredentials(wc) - mockStateReader.EXPECT().Version().Return(clparams.CapellaVersion).AnyTimes() - mockStateReader.EXPECT().ValidatorForValidatorIndex(int(mockMsg.SignedBLSToExecutionChange.Message.ValidatorIndex)).Return(mockValidator, nil).AnyTimes() - t.syncedData.EXPECT().HeadStateReader().Return(mockStateReader, synced_data.EmptyCancel).AnyTimes() - mockStateReader.EXPECT().GenesisValidatorsRoot().Return([32]byte{}).AnyTimes() - // bls verify - t.gomockCtrl.RecordCall(t.mockFuncs, "ComputeSigningRoot", mockMsg.SignedBLSToExecutionChange.Message, gomock.Any()).Return([32]byte{}, nil).AnyTimes() - // update withdrawal credentials - mockNewWc := common.Hash{byte(t.beaconCfg.ETH1AddressWithdrawalPrefixByte)} - copy(mockNewWc[1:], make([]byte, 11)) - copy(mockNewWc[12:], mockMsg.SignedBLSToExecutionChange.Message.To[:]) - t.gomockCtrl.RecordCall(t.mockFuncs, "BlsVerifyMultipleSignatures", gomock.Any(), gomock.Any(), gomock.Any()).Return(true, nil).AnyTimes() - }, - msg: mockMsg, - // specificErr: ErrInvalidBlsSignature, - // wantErr: true, - }, + // { + // name: "invalid bls signature", + // mock: func() { + // mockStateReader := mockState.NewMockBeaconStateReader(t.gomockCtrl) + // mockValidator := solid.NewValidator() + // hashedFrom := utils.Sha256(mockMsg.SignedBLSToExecutionChange.Message.From[:]) + // wc := [32]byte{0} + // copy(wc[1:], hashedFrom[1:]) + // mockValidator.SetWithdrawalCredentials(wc) + // mockStateReader.EXPECT().Version().Return(clparams.CapellaVersion).AnyTimes() + // mockStateReader.EXPECT().ValidatorForValidatorIndex(int(mockMsg.SignedBLSToExecutionChange.Message.ValidatorIndex)).Return(mockValidator, nil).AnyTimes() + // mockStateReader.EXPECT().GenesisValidatorsRoot().Return([32]byte{}).AnyTimes() + // // bls verify + // t.gomockCtrl.RecordCall(t.mockFuncs, "ComputeSigningRoot", mockMsg.SignedBLSToExecutionChange.Message, gomock.Any()).Return([32]byte{}, nil).AnyTimes() + // t.gomockCtrl.RecordCall(t.mockFuncs, "BlsVerifyMultipleSignatures", gomock.Any(), gomock.Any(), gomock.Any()).Return(false, nil).Times(2) + // }, + // msg: mockMsg, + // specificErr: ErrInvalidBlsSignature, + // wantErr: true, + // }, + // { + // name: "pass", + // mock: func() { + // mockStateReader := mockState.NewMockBeaconStateReader(t.gomockCtrl) + // mockValidator := solid.NewValidator() + // hashedFrom := utils.Sha256(mockMsg.SignedBLSToExecutionChange.Message.From[:]) + // wc := [32]byte{0} + // copy(wc[1:], hashedFrom[1:]) + // mockValidator.SetWithdrawalCredentials(wc) + // mockStateReader.EXPECT().Version().Return(clparams.CapellaVersion).AnyTimes() + // mockStateReader.EXPECT().ValidatorForValidatorIndex(int(mockMsg.SignedBLSToExecutionChange.Message.ValidatorIndex)).Return(mockValidator, nil).AnyTimes() + // mockStateReader.EXPECT().GenesisValidatorsRoot().Return([32]byte{}).AnyTimes() + // // bls verify + // t.gomockCtrl.RecordCall(t.mockFuncs, "ComputeSigningRoot", mockMsg.SignedBLSToExecutionChange.Message, gomock.Any()).Return([32]byte{}, nil).AnyTimes() + // // update withdrawal credentials + // mockNewWc := common.Hash{byte(t.beaconCfg.ETH1AddressWithdrawalPrefixByte)} + // copy(mockNewWc[1:], make([]byte, 11)) + // copy(mockNewWc[12:], mockMsg.SignedBLSToExecutionChange.Message.To[:]) + // t.gomockCtrl.RecordCall(t.mockFuncs, "BlsVerifyMultipleSignatures", gomock.Any(), gomock.Any(), gomock.Any()).Return(true, nil).AnyTimes() + // }, + // msg: mockMsg, + // // specificErr: ErrInvalidBlsSignature, + // // wantErr: true, + // }, } for _, tt := range tests { diff --git a/cl/phase1/network/services/proposer_slashing_service.go b/cl/phase1/network/services/proposer_slashing_service.go index d3a6adc0dd4..876ccc8c806 100644 --- a/cl/phase1/network/services/proposer_slashing_service.go +++ b/cl/phase1/network/services/proposer_slashing_service.go @@ -91,42 +91,38 @@ func (s *proposerSlashingService) ProcessMessage(ctx context.Context, subnet *ui return errors.New("proposee slashing headers are the same") } - // Verify the proposer is slashable - state, cn := s.syncedDataManager.HeadStateReader() - defer cn() - if state == nil { - return ErrIgnore - } - proposer, err := state.ValidatorForValidatorIndex(int(h1.ProposerIndex)) - if err != nil { - return fmt.Errorf("unable to retrieve state: %v", err) - } - if !proposer.IsSlashable(s.ethClock.GetCurrentEpoch()) { - return fmt.Errorf("proposer is not slashable: %v", proposer) - } - - // Verify signatures for both headers - for _, signedHeader := range []*cltypes.SignedBeaconBlockHeader{msg.Header1, msg.Header2} { - domain, err := state.GetDomain(s.beaconCfg.DomainBeaconProposer, st.GetEpochAtSlot(s.beaconCfg, signedHeader.Header.Slot)) + return s.syncedDataManager.ViewHeadState(func(state *st.CachingBeaconState) error { + proposer, err := state.ValidatorForValidatorIndex(int(h1.ProposerIndex)) if err != nil { - return fmt.Errorf("unable to get domain: %v", err) + return fmt.Errorf("unable to retrieve state: %v", err) } - pk := proposer.PublicKey() - signingRoot, err := computeSigningRoot(signedHeader, domain) - if err != nil { - return fmt.Errorf("unable to compute signing root: %v", err) - } - valid, err := blsVerify(signedHeader.Signature[:], signingRoot[:], pk[:]) - if err != nil { - return fmt.Errorf("unable to verify signature: %v", err) + if !proposer.IsSlashable(s.ethClock.GetCurrentEpoch()) { + return fmt.Errorf("proposer is not slashable: %v", proposer) } - if !valid { - return fmt.Errorf("invalid signature: signature %v, root %v, pubkey %v", signedHeader.Signature[:], signingRoot[:], pk) + + // Verify signatures for both headers + for _, signedHeader := range []*cltypes.SignedBeaconBlockHeader{msg.Header1, msg.Header2} { + domain, err := state.GetDomain(s.beaconCfg.DomainBeaconProposer, st.GetEpochAtSlot(s.beaconCfg, signedHeader.Header.Slot)) + if err != nil { + return fmt.Errorf("unable to get domain: %v", err) + } + pk := proposer.PublicKey() + signingRoot, err := computeSigningRoot(signedHeader, domain) + if err != nil { + return fmt.Errorf("unable to compute signing root: %v", err) + } + valid, err := blsVerify(signedHeader.Signature[:], signingRoot[:], pk[:]) + if err != nil { + return fmt.Errorf("unable to verify signature: %v", err) + } + if !valid { + return fmt.Errorf("invalid signature: signature %v, root %v, pubkey %v", signedHeader.Signature[:], signingRoot[:], pk) + } } - } - s.operationsPool.ProposerSlashingsPool.Insert(pool.ComputeKeyForProposerSlashing(msg), msg) - s.cache.Add(pIndex, struct{}{}) - s.emitters.Operation().SendProposerSlashing(msg) - return nil + s.operationsPool.ProposerSlashingsPool.Insert(pool.ComputeKeyForProposerSlashing(msg), msg) + s.cache.Add(pIndex, struct{}{}) + s.emitters.Operation().SendProposerSlashing(msg) + return nil + }) } diff --git a/cl/phase1/network/services/proposer_slashing_service_test.go b/cl/phase1/network/services/proposer_slashing_service_test.go index 90b52ee08f7..7092ca27bf0 100644 --- a/cl/phase1/network/services/proposer_slashing_service_test.go +++ b/cl/phase1/network/services/proposer_slashing_service_test.go @@ -18,15 +18,13 @@ package services import ( "context" - "errors" "log" "testing" "github.com/erigontech/erigon-lib/common" - mockState "github.com/erigontech/erigon/cl/abstract/mock_services" + "github.com/erigontech/erigon/cl/antiquary/tests" "github.com/erigontech/erigon/cl/beacon/beaconevents" "github.com/erigontech/erigon/cl/beacon/synced_data" - mockSync "github.com/erigontech/erigon/cl/beacon/synced_data/mock_services" "github.com/erigontech/erigon/cl/clparams" "github.com/erigontech/erigon/cl/cltypes" "github.com/erigontech/erigon/cl/cltypes/solid" @@ -40,7 +38,7 @@ type proposerSlashingTestSuite struct { suite.Suite gomockCtrl *gomock.Controller operationsPool *pool.OperationsPool - syncedData *mockSync.MockSyncedData + syncedData synced_data.SyncedData beaconCfg *clparams.BeaconChainConfig ethClock *eth_clock.MockEthereumClock proposerSlashingService *proposerSlashingService @@ -52,7 +50,9 @@ func (t *proposerSlashingTestSuite) SetupTest() { t.operationsPool = &pool.OperationsPool{ ProposerSlashingsPool: pool.NewOperationPool[common.Bytes96, *cltypes.ProposerSlashing](10, "proposerSlashingsPool"), } - t.syncedData = mockSync.NewMockSyncedData(t.gomockCtrl) + _, st, _ := tests.GetBellatrixRandom() + t.syncedData = synced_data.NewSyncedDataManager(&clparams.MainnetBeaconConfig, true, 0) + t.syncedData.OnHeadState(st) t.ethClock = eth_clock.NewMockEthereumClock(t.gomockCtrl) t.beaconCfg = &clparams.BeaconChainConfig{ SlotsPerEpoch: 2, @@ -89,6 +89,24 @@ func (t *proposerSlashingTestSuite) TestProcessMessage() { Signature: common.Bytes96{4, 5, 6}, }, } + mockMsg2 := &cltypes.ProposerSlashing{ + Header1: &cltypes.SignedBeaconBlockHeader{ + Header: &cltypes.BeaconBlockHeader{ + Slot: 1, + ProposerIndex: 9191991, + Root: common.Hash{1}, + }, + Signature: common.Bytes96{1, 2, 3}, + }, + Header2: &cltypes.SignedBeaconBlockHeader{ + Header: &cltypes.BeaconBlockHeader{ + Slot: 1, + ProposerIndex: 9191991, + Root: common.Hash{2}, + }, + Signature: common.Bytes96{4, 5, 6}, + }, + } tests := []struct { name string mock func() @@ -155,26 +173,24 @@ func (t *proposerSlashingTestSuite) TestProcessMessage() { { name: "empty head state", mock: func() { - t.syncedData.EXPECT().HeadStateReader().Return(nil, synced_data.EmptyCancel).Times(1) + t.syncedData.UnsetHeadState() }, msg: mockMsg, wantErr: true, - err: ErrIgnore, + err: synced_data.ErrNotSynced, }, { name: "validator not found", mock: func() { - mockState := mockState.NewMockBeaconStateReader(t.gomockCtrl) - mockState.EXPECT().ValidatorForValidatorIndex(int(mockProposerIndex)).Return(nil, errors.New("not found")).Times(1) - t.syncedData.EXPECT().HeadStateReader().Return(mockState, synced_data.EmptyCancel).Times(1) + _, st, _ := tests.GetBellatrixRandom() + t.syncedData.OnHeadState(st) }, - msg: mockMsg, + msg: mockMsg2, wantErr: true, }, { name: "proposer is not slashable", mock: func() { - mockState := mockState.NewMockBeaconStateReader(t.gomockCtrl) mockValidator := solid.NewValidatorFromParameters( [48]byte{}, [32]byte{}, @@ -185,8 +201,10 @@ func (t *proposerSlashingTestSuite) TestProcessMessage() { 0, 0, ) - mockState.EXPECT().ValidatorForValidatorIndex(int(mockProposerIndex)).Return(mockValidator, nil).Times(1) - t.syncedData.EXPECT().HeadStateReader().Return(mockState, synced_data.EmptyCancel).Times(1) + _, st, _ := tests.GetBellatrixRandom() + st.ValidatorSet().Set(int(mockProposerIndex), mockValidator) + t.syncedData.OnHeadState(st) + t.ethClock.EXPECT().GetCurrentEpoch().Return(uint64(1)).Times(1) }, msg: mockMsg, @@ -195,24 +213,21 @@ func (t *proposerSlashingTestSuite) TestProcessMessage() { { name: "pass", mock: func() { - mockState := mockState.NewMockBeaconStateReader(t.gomockCtrl) - mockValidator := solid.NewValidatorFromParameters( - [48]byte{}, - [32]byte{}, - 0, - false, - 0, - 0, - 2, - 2, - ) - t.syncedData.EXPECT().HeadStateReader().Return(mockState, synced_data.EmptyCancel).Times(1) - mockState.EXPECT().ValidatorForValidatorIndex(int(mockProposerIndex)).Return(mockValidator, nil).Times(1) + // mockState := mockState.NewMockBeaconStateReader(t.gomockCtrl) + // mockValidator := solid.NewValidatorFromParameters( + // [48]byte{}, + // [32]byte{}, + // 0, + // false, + // 0, + // 0, + // 2, + // 2, + // ) t.ethClock.EXPECT().GetCurrentEpoch().Return(uint64(1)).Times(1) - mockState.EXPECT().GetDomain(t.beaconCfg.DomainBeaconProposer, gomock.Any()).Return([]byte{}, nil).Times(2) - t.mockFuncs.ctrl.RecordCall(t.mockFuncs, "ComputeSigningRoot", mockMsg.Header1, []byte{}).Return([32]byte{}, nil).Times(1) - t.mockFuncs.ctrl.RecordCall(t.mockFuncs, "ComputeSigningRoot", mockMsg.Header2, []byte{}).Return([32]byte{}, nil).Times(1) + t.mockFuncs.ctrl.RecordCall(t.mockFuncs, "ComputeSigningRoot", mockMsg.Header1, gomock.Any()).Return([32]byte{}, nil).Times(1) + t.mockFuncs.ctrl.RecordCall(t.mockFuncs, "ComputeSigningRoot", mockMsg.Header2, gomock.Any()).Return([32]byte{}, nil).Times(1) t.mockFuncs.ctrl.RecordCall(t.mockFuncs, "BlsVerify", gomock.Any(), gomock.Any(), gomock.Any()).Return(true, nil).Times(2) }, msg: mockMsg, diff --git a/cl/phase1/network/services/sync_committee_messages_service.go b/cl/phase1/network/services/sync_committee_messages_service.go index 22320d2bc56..3355fdcae47 100644 --- a/cl/phase1/network/services/sync_committee_messages_service.go +++ b/cl/phase1/network/services/sync_committee_messages_service.go @@ -72,47 +72,42 @@ func NewSyncCommitteeMessagesService( // ProcessMessage processes a sync committee message func (s *syncCommitteeMessagesService) ProcessMessage(ctx context.Context, subnet *uint64, msg *cltypes.SyncCommitteeMessage) error { - //return ErrIgnore - s.mu.Lock() defer s.mu.Unlock() - headState, cn := s.syncedDataManager.HeadState() - defer cn() - if headState == nil { - return ErrIgnore - } - // [IGNORE] The message's slot is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance), i.e. sync_committee_message.slot == current_slot. - if !s.ethClock.IsSlotCurrentSlotWithMaximumClockDisparity(msg.Slot) { - return ErrIgnore - } - // [REJECT] The subnet_id is valid for the given validator, i.e. subnet_id in compute_subnets_for_sync_committee(state, sync_committee_message.validator_index). - // Note this validation implies the validator is part of the broader current sync committee along with the correct subcommittee. - subnets, err := subnets.ComputeSubnetsForSyncCommittee(headState, msg.ValidatorIndex) - if err != nil { - return err - } - seenSyncCommitteeMessageIdentifier := seenSyncCommitteeMessage{ - subnet: *subnet, - slot: msg.Slot, - validatorIndex: msg.ValidatorIndex, - } + return s.syncedDataManager.ViewHeadState(func(headState *state.CachingBeaconState) error { + // [IGNORE] The message's slot is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance), i.e. sync_committee_message.slot == current_slot. + if !s.ethClock.IsSlotCurrentSlotWithMaximumClockDisparity(msg.Slot) { + return ErrIgnore + } + // [REJECT] The subnet_id is valid for the given validator, i.e. subnet_id in compute_subnets_for_sync_committee(state, sync_committee_message.validator_index). + // Note this validation implies the validator is part of the broader current sync committee along with the correct subcommittee. + subnets, err := subnets.ComputeSubnetsForSyncCommittee(headState, msg.ValidatorIndex) + if err != nil { + return err + } + seenSyncCommitteeMessageIdentifier := seenSyncCommitteeMessage{ + subnet: *subnet, + slot: msg.Slot, + validatorIndex: msg.ValidatorIndex, + } - if !slices.Contains(subnets, *subnet) { - return fmt.Errorf("validator is not into any subnet %d", *subnet) - } - // [IGNORE] There has been no other valid sync committee message for the declared slot for the validator referenced by sync_committee_message.validator_index. - if _, ok := s.seenSyncCommitteeMessages[seenSyncCommitteeMessageIdentifier]; ok { - return ErrIgnore - } - // [REJECT] The signature is valid for the message beacon_block_root for the validator referenced by validator_index - if err := verifySyncCommitteeMessageSignature(headState, msg); !s.test && err != nil { - return err - } - s.seenSyncCommitteeMessages[seenSyncCommitteeMessageIdentifier] = struct{}{} - s.cleanupOldSyncCommitteeMessages() // cleanup old messages - // Aggregate the message - return s.syncContributionPool.AddSyncCommitteeMessage(headState, *subnet, msg) + if !slices.Contains(subnets, *subnet) { + return fmt.Errorf("validator is not into any subnet %d", *subnet) + } + // [IGNORE] There has been no other valid sync committee message for the declared slot for the validator referenced by sync_committee_message.validator_index. + if _, ok := s.seenSyncCommitteeMessages[seenSyncCommitteeMessageIdentifier]; ok { + return ErrIgnore + } + // [REJECT] The signature is valid for the message beacon_block_root for the validator referenced by validator_index + if err := verifySyncCommitteeMessageSignature(headState, msg); !s.test && err != nil { + return err + } + s.seenSyncCommitteeMessages[seenSyncCommitteeMessageIdentifier] = struct{}{} + s.cleanupOldSyncCommitteeMessages() // cleanup old messages + // Aggregate the message + return s.syncContributionPool.AddSyncCommitteeMessage(headState, *subnet, msg) + }) } // cleanupOldSyncCommitteeMessages removes old sync committee messages from the cache diff --git a/cl/phase1/network/services/sync_committee_messages_service_test.go b/cl/phase1/network/services/sync_committee_messages_service_test.go index c6a940d5035..ffea030bb51 100644 --- a/cl/phase1/network/services/sync_committee_messages_service_test.go +++ b/cl/phase1/network/services/sync_committee_messages_service_test.go @@ -33,7 +33,7 @@ import ( func setupSyncCommitteesServiceTest(t *testing.T, ctrl *gomock.Controller) (SyncCommitteeMessagesService, *synced_data.SyncedDataManager, *eth_clock.MockEthereumClock) { cfg := &clparams.MainnetBeaconConfig - syncedDataManager := synced_data.NewSyncedDataManager(true, cfg) + syncedDataManager := synced_data.NewSyncedDataManager(cfg, true, 0) ethClock := eth_clock.NewMockEthereumClock(ctrl) syncContributionPool := syncpoolmock.NewMockSyncContributionPool(ctrl) s := NewSyncCommitteeMessagesService(cfg, ethClock, syncedDataManager, syncContributionPool, true) diff --git a/cl/phase1/network/services/sync_contribution_service.go b/cl/phase1/network/services/sync_contribution_service.go index 3d8dbe4f951..154192de9d5 100644 --- a/cl/phase1/network/services/sync_contribution_service.go +++ b/cl/phase1/network/services/sync_contribution_service.go @@ -87,76 +87,74 @@ func (s *syncContributionService) ProcessMessage(ctx context.Context, subnet *ui selectionProof := contributionAndProof.SelectionProof aggregationBits := contributionAndProof.Contribution.AggregationBits - headState, cn := s.syncedDataManager.HeadState() - defer cn() - if headState == nil { - return ErrIgnore - } + return s.syncedDataManager.ViewHeadState(func(headState *state.CachingBeaconState) error { - // [REJECT] The subcommittee index is in the allowed range, i.e. contribution.subcommittee_index < SYNC_COMMITTEE_SUBNET_COUNT. - if contributionAndProof.Contribution.SubcommitteeIndex >= clparams.MainnetBeaconConfig.SyncCommitteeSubnetCount { - return errors.New("subcommittee index is out of range") - } + // [IGNORE] The contribution's slot is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance), i.e. contribution.slot == current_slot. + if !s.ethClock.IsSlotCurrentSlotWithMaximumClockDisparity(contributionAndProof.Contribution.Slot) { + return ErrIgnore + } - aggregatorPubKey, err := headState.ValidatorPublicKey(int(contributionAndProof.AggregatorIndex)) - if err != nil { - return err - } - subcommiteePubsKeys, err := s.getSyncSubcommitteePubkeys(headState, contributionAndProof.Contribution.SubcommitteeIndex) - if err != nil { - return err - } + // [REJECT] The contribution has participants -- that is, any(contribution.aggregation_bits). + if bytes.Equal(aggregationBits, make([]byte, len(aggregationBits))) { // check if the aggregation bits are all zeros + return errors.New("contribution has no participants") + } - // [IGNORE] The contribution's slot is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance), i.e. contribution.slot == current_slot. - if !s.ethClock.IsSlotCurrentSlotWithMaximumClockDisparity(contributionAndProof.Contribution.Slot) { - return ErrIgnore - } + // [REJECT] The subcommittee index is in the allowed range, i.e. contribution.subcommittee_index < SYNC_COMMITTEE_SUBNET_COUNT. + if contributionAndProof.Contribution.SubcommitteeIndex >= clparams.MainnetBeaconConfig.SyncCommitteeSubnetCount { + return errors.New("subcommittee index is out of range") + } - // [REJECT] The contribution has participants -- that is, any(contribution.aggregation_bits). - if bytes.Equal(aggregationBits, make([]byte, len(aggregationBits))) { // check if the aggregation bits are all zeros - return errors.New("contribution has no participants") - } + aggregatorPubKey, err := headState.ValidatorPublicKey(int(contributionAndProof.AggregatorIndex)) + if err != nil { + return err + } + subcommiteePubsKeys, err := s.getSyncSubcommitteePubkeys(headState, contributionAndProof.Contribution.SubcommitteeIndex) + if err != nil { + return err + } - modulo := max(1, s.beaconCfg.SyncCommitteeSize/s.beaconCfg.SyncCommitteeSubnetCount/s.beaconCfg.TargetAggregatorsPerSyncSubcommittee) - hashSignature := utils.Sha256(selectionProof[:]) - if !s.test && binary.LittleEndian.Uint64(hashSignature[:8])%modulo != 0 { - return errors.New("selects the validator as an aggregator") - } + modulo := max(1, s.beaconCfg.SyncCommitteeSize/s.beaconCfg.SyncCommitteeSubnetCount/s.beaconCfg.TargetAggregatorsPerSyncSubcommittee) + hashSignature := utils.Sha256(selectionProof[:]) + if !s.test && binary.LittleEndian.Uint64(hashSignature[:8])%modulo != 0 { + return errors.New("selects the validator as an aggregator") + } - // [REJECT] The aggregator's validator index is in the declared subcommittee of the current sync committee -- i.e. state.validators[contribution_and_proof.aggregator_index].pubkey in get_sync_subcommittee_pubkeys(state, contribution.subcommittee_index). - if !slices.Contains(subcommiteePubsKeys, aggregatorPubKey) { - return errors.New("aggregator's validator index is not in subcommittee") - } + // [REJECT] The aggregator's validator index is in the declared subcommittee of the current sync committee -- i.e. state.validators[contribution_and_proof.aggregator_index].pubkey in get_sync_subcommittee_pubkeys(state, contribution.subcommittee_index). + if !slices.Contains(subcommiteePubsKeys, aggregatorPubKey) { + return errors.New("aggregator's validator index is not in subcommittee") + } - // [IGNORE] The sync committee contribution is the first valid contribution received for the aggregator with index contribution_and_proof.aggregator_index for the slot contribution.slot and subcommittee index contribution.subcommittee_index (this requires maintaining a cache of size SYNC_COMMITTEE_SIZE for this topic that can be flushed after each slot). - if s.wasContributionSeen(contributionAndProof) { - return ErrIgnore - } + // [IGNORE] The sync committee contribution is the first valid contribution received for the aggregator with index contribution_and_proof.aggregator_index for the slot contribution.slot and subcommittee index contribution.subcommittee_index (this requires maintaining a cache of size SYNC_COMMITTEE_SIZE for this topic that can be flushed after each slot). + if s.wasContributionSeen(contributionAndProof) { + return ErrIgnore + } - // [REJECT] The contribution_and_proof.selection_proof is a valid signature of the SyncAggregatorSelectionData derived from the contribution by the validator with index contribution_and_proof.aggregator_index. - if err := verifySyncContributionSelectionProof(headState, contributionAndProof); !s.test && err != nil { - return err - } - // [REJECT] The aggregator signature, signed_contribution_and_proof.signature, is valid. - if err := verifyAggregatorSignatureForSyncContribution(headState, signedContribution); !s.test && err != nil { - return err - } - // [REJECT] The aggregate signature is valid for the message beacon_block_root and aggregate pubkey derived - // from the participation info in aggregation_bits for the subcommittee specified by the contribution.subcommittee_index. - if err := verifySyncContributionProofAggregatedSignature(headState, contributionAndProof.Contribution, subcommiteePubsKeys); !s.test && err != nil { + // [REJECT] The contribution_and_proof.selection_proof is a valid signature of the SyncAggregatorSelectionData derived from the contribution by the validator with index contribution_and_proof.aggregator_index. + if err := verifySyncContributionSelectionProof(headState, contributionAndProof); !s.test && err != nil { + return err + } + // [REJECT] The aggregator signature, signed_contribution_and_proof.signature, is valid. + if err := verifyAggregatorSignatureForSyncContribution(headState, signedContribution); !s.test && err != nil { + return err + } + // [REJECT] The aggregate signature is valid for the message beacon_block_root and aggregate pubkey derived + // from the participation info in aggregation_bits for the subcommittee specified by the contribution.subcommittee_index. + if err := verifySyncContributionProofAggregatedSignature(headState, contributionAndProof.Contribution, subcommiteePubsKeys); !s.test && err != nil { + return err + } + // mark the valid contribution as seen + s.markContributionAsSeen(contributionAndProof) + + // emit contribution_and_proof + s.emitters.Operation().SendContributionProof(signedContribution) + // add the contribution to the pool + err = s.syncContributionPool.AddSyncContribution(headState, contributionAndProof.Contribution) + if errors.Is(err, sync_contribution_pool.ErrIsSuperset) { + return ErrIgnore + } return err - } - // mark the valid contribution as seen - s.markContributionAsSeen(contributionAndProof) - - // emit contribution_and_proof - s.emitters.Operation().SendContributionProof(signedContribution) - // add the contribution to the pool - err = s.syncContributionPool.AddSyncContribution(headState, contributionAndProof.Contribution) - if errors.Is(err, sync_contribution_pool.ErrIsSuperset) { - return ErrIgnore - } - return err + }) + } // def get_sync_subcommittee_pubkeys(state: BeaconState, subcommittee_index: uint64) -> Sequence[BLSPubkey]: diff --git a/cl/phase1/network/services/sync_contribution_service_test.go b/cl/phase1/network/services/sync_contribution_service_test.go index 3b1ef16e96a..be472fbe7ca 100644 --- a/cl/phase1/network/services/sync_contribution_service_test.go +++ b/cl/phase1/network/services/sync_contribution_service_test.go @@ -35,7 +35,7 @@ import ( func setupSyncContributionServiceTest(t *testing.T, ctrl *gomock.Controller) (SyncContributionService, *synced_data.SyncedDataManager, *eth_clock.MockEthereumClock) { cfg := &clparams.MainnetBeaconConfig - syncedDataManager := synced_data.NewSyncedDataManager(true, cfg) + syncedDataManager := synced_data.NewSyncedDataManager(cfg, true, 0) ethClock := eth_clock.NewMockEthereumClock(ctrl) syncContributionPool := syncpoolmock.NewMockSyncContributionPool(ctrl) s := NewSyncContributionService(syncedDataManager, cfg, syncContributionPool, ethClock, beaconevents.NewEventEmitter(), true) diff --git a/cl/phase1/network/services/voluntary_exit_service.go b/cl/phase1/network/services/voluntary_exit_service.go index 71dc19b74fa..da23aa8b313 100644 --- a/cl/phase1/network/services/voluntary_exit_service.go +++ b/cl/phase1/network/services/voluntary_exit_service.go @@ -20,11 +20,13 @@ import ( "context" "fmt" + "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon/cl/beacon/beaconevents" "github.com/erigontech/erigon/cl/beacon/synced_data" "github.com/erigontech/erigon/cl/clparams" "github.com/erigontech/erigon/cl/cltypes" "github.com/erigontech/erigon/cl/fork" + "github.com/erigontech/erigon/cl/phase1/core/state" "github.com/erigontech/erigon/cl/pool" "github.com/erigontech/erigon/cl/utils" "github.com/erigontech/erigon/cl/utils/eth_clock" @@ -67,57 +69,62 @@ func (s *voluntaryExitService) ProcessMessage(ctx context.Context, subnet *uint6 return ErrIgnore } + var ( + signingRoot common.Hash + pk common.Bytes48 + domain []byte + ) + // ref: https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#voluntary-exits // def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVoluntaryExit) -> None: - state, cn := s.syncedDataManager.HeadStateReader() - defer cn() - if state == nil { - return ErrIgnore - } - - val, err := state.ValidatorForValidatorIndex(int(voluntaryExit.ValidatorIndex)) - if err != nil { - return ErrIgnore - } - curEpoch := s.ethClock.GetCurrentEpoch() - - // Verify the validator is active - // assert is_active_validator(validator, get_current_epoch(state)) - if !val.Active(curEpoch) { - return errors.New("validator is not active") - } - - // Verify exit has not been initiated - // assert validator.exit_epoch == FAR_FUTURE_EPOCH - if !(val.ExitEpoch() == s.beaconCfg.FarFutureEpoch) { - return fmt.Errorf("verify exit has not been initiated. exitEpoch: %d, farFutureEpoch: %d", val.ExitEpoch(), s.beaconCfg.FarFutureEpoch) - } - - // Exits must specify an epoch when they become valid; they are not valid before then - // assert get_current_epoch(state) >= voluntary_exit.epoch - if !(curEpoch >= voluntaryExit.Epoch) { - return errors.New("exits must specify an epoch when they become valid; they are not valid before then") - } - - // Verify the validator has been active long enough - // assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD - if !(curEpoch >= val.ActivationEpoch()+s.beaconCfg.ShardCommitteePeriod) { - return errors.New("verify the validator has been active long enough") - } - - // Verify signature - // domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch) - // signing_root = compute_signing_root(voluntary_exit, domain) - // assert bls.Verify(validator.pubkey, signing_root, signed_voluntary_exit.signature) - pk := val.PublicKey() - domainType := s.beaconCfg.DomainVoluntaryExit - var domain []byte - if state.Version() < clparams.DenebVersion { - domain, err = state.GetDomain(domainType, voluntaryExit.Epoch) - } else if state.Version() >= clparams.DenebVersion { - domain, err = fork.ComputeDomain(domainType[:], utils.Uint32ToBytes4(uint32(s.beaconCfg.CapellaForkVersion)), state.GenesisValidatorsRoot()) - } - if err != nil { + if err := s.syncedDataManager.ViewHeadState(func(state *state.CachingBeaconState) error { + val, err := state.ValidatorForValidatorIndex(int(voluntaryExit.ValidatorIndex)) + if err != nil { + return ErrIgnore + } + curEpoch := s.ethClock.GetCurrentEpoch() + + // Verify the validator is active + // assert is_active_validator(validator, get_current_epoch(state)) + if !val.Active(curEpoch) { + return errors.New("validator is not active") + } + + // Verify exit has not been initiated + // assert validator.exit_epoch == FAR_FUTURE_EPOCH + if val.ExitEpoch() != s.beaconCfg.FarFutureEpoch { + return fmt.Errorf("verify exit has not been initiated. exitEpoch: %d, farFutureEpoch: %d", val.ExitEpoch(), s.beaconCfg.FarFutureEpoch) + } + + // Exits must specify an epoch when they become valid; they are not valid before then + // assert get_current_epoch(state) >= voluntary_exit.epoch + if curEpoch < voluntaryExit.Epoch { + return errors.New("exits must specify an epoch when they become valid; they are not valid before then") + } + + // Verify the validator has been active long enough + // assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD + if curEpoch < val.ActivationEpoch()+s.beaconCfg.ShardCommitteePeriod { + return errors.New("verify the validator has been active long enough") + } + + // Verify signature + // domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch) + // signing_root = compute_signing_root(voluntary_exit, domain) + // assert bls.Verify(validator.pubkey, signing_root, signed_voluntary_exit.signature) + pk = val.PublicKey() + domainType := s.beaconCfg.DomainVoluntaryExit + if state.Version() < clparams.DenebVersion { + domain, err = state.GetDomain(domainType, voluntaryExit.Epoch) + } else if state.Version() >= clparams.DenebVersion { + domain, err = fork.ComputeDomain(domainType[:], utils.Uint32ToBytes4(uint32(s.beaconCfg.CapellaForkVersion)), state.GenesisValidatorsRoot()) + } + if err != nil { + return err + } + signingRoot, err = computeSigningRoot(voluntaryExit, domain) + return err + }); err != nil { return err } signingRoot, err := computeSigningRoot(voluntaryExit, domain) @@ -136,8 +143,6 @@ func (s *voluntaryExitService) ProcessMessage(ctx context.Context, subnet *uint6 }, } - cn() - if msg.ImmediateVerification { return s.batchSignatureVerifier.ImmediateVerification(aggregateVerificationData) } diff --git a/cl/phase1/network/services/voluntary_exit_service_test.go b/cl/phase1/network/services/voluntary_exit_service_test.go index eb994d44b41..c4bd224980e 100644 --- a/cl/phase1/network/services/voluntary_exit_service_test.go +++ b/cl/phase1/network/services/voluntary_exit_service_test.go @@ -23,16 +23,14 @@ import ( "time" "github.com/erigontech/erigon-lib/types/ssz" - mockState "github.com/erigontech/erigon/cl/abstract/mock_services" + "github.com/erigontech/erigon/cl/antiquary/tests" "github.com/erigontech/erigon/cl/beacon/beaconevents" "github.com/erigontech/erigon/cl/beacon/synced_data" - mockSync "github.com/erigontech/erigon/cl/beacon/synced_data/mock_services" "github.com/erigontech/erigon/cl/clparams" "github.com/erigontech/erigon/cl/cltypes" "github.com/erigontech/erigon/cl/cltypes/solid" "github.com/erigontech/erigon/cl/pool" "github.com/erigontech/erigon/cl/utils/eth_clock" - "github.com/pkg/errors" "github.com/stretchr/testify/suite" "go.uber.org/mock/gomock" ) @@ -42,7 +40,7 @@ type voluntaryExitTestSuite struct { gomockCtrl *gomock.Controller operationsPool *pool.OperationsPool emitters *beaconevents.EventEmitter - syncedData *mockSync.MockSyncedData + syncedData synced_data.SyncedData ethClock *eth_clock.MockEthereumClock beaconCfg *clparams.BeaconChainConfig voluntaryExitService VoluntaryExitService @@ -59,7 +57,9 @@ func (t *voluntaryExitTestSuite) SetupTest() { t.operationsPool = &pool.OperationsPool{ VoluntaryExitsPool: pool.NewOperationPool[uint64, *cltypes.SignedVoluntaryExit](10, "voluntaryExitsPool"), } - t.syncedData = mockSync.NewMockSyncedData(t.gomockCtrl) + _, st, _ := tests.GetBellatrixRandom() + t.syncedData = synced_data.NewSyncedDataManager(&clparams.MainnetBeaconConfig, true, 0) + t.syncedData.OnHeadState(st) t.ethClock = eth_clock.NewMockEthereumClock(t.gomockCtrl) t.beaconCfg = &clparams.BeaconChainConfig{} batchSignatureVerifier := NewBatchSignatureVerifier(context.TODO(), nil) @@ -91,6 +91,19 @@ func (t *voluntaryExitTestSuite) TestProcessMessage() { GossipData: nil, ImmediateVerification: true, } + mockMsg2 := &cltypes.SignedVoluntaryExitWithGossipData{ + SignedVoluntaryExit: &cltypes.SignedVoluntaryExit{ + VoluntaryExit: &cltypes.VoluntaryExit{ + Epoch: 1, + ValidatorIndex: 111111111, + }, + Signature: [96]byte{}, + }, + GossipData: nil, + ImmediateVerification: true, + } + + _, _, _ = mockMsg, mockMsg2, curEpoch tests := []struct { name string @@ -111,27 +124,25 @@ func (t *voluntaryExitTestSuite) TestProcessMessage() { { name: "state is nil", mock: func() { - t.syncedData.EXPECT().HeadStateReader().Return(nil, synced_data.EmptyCancel) + t.syncedData.UnsetHeadState() }, msg: mockMsg, wantErr: true, - err: ErrIgnore, + err: synced_data.ErrNotSynced, }, { name: "validator not found", mock: func() { - mockState := mockState.NewMockBeaconStateReader(t.gomockCtrl) - mockState.EXPECT().ValidatorForValidatorIndex(int(mockValidatorIndex)).Return(nil, errors.New("not found")).Times(1) - t.syncedData.EXPECT().HeadStateReader().Return(mockState, synced_data.EmptyCancel).Times(1) + //t.ethClock.EXPECT().GetCurrentEpoch().Return(curEpoch).Times(int(mockEpoch)) }, - msg: mockMsg, + msg: mockMsg2, wantErr: true, err: ErrIgnore, }, { name: "validator is not active", mock: func() { - mockState := mockState.NewMockBeaconStateReader(t.gomockCtrl) + _, st, _ := tests.GetBellatrixRandom() mockValidator := solid.NewValidatorFromParameters( [48]byte{}, [32]byte{}, @@ -142,8 +153,8 @@ func (t *voluntaryExitTestSuite) TestProcessMessage() { 0, 0, ) - mockState.EXPECT().ValidatorForValidatorIndex(int(mockValidatorIndex)).Return(mockValidator, nil).Times(1) - t.syncedData.EXPECT().HeadStateReader().Return(mockState, synced_data.EmptyCancel).Times(1) + st.ValidatorSet().Set(int(mockValidatorIndex), mockValidator) + t.syncedData.OnHeadState(st) t.ethClock.EXPECT().GetCurrentEpoch().Return(curEpoch).Times(1) }, msg: mockMsg, @@ -152,19 +163,18 @@ func (t *voluntaryExitTestSuite) TestProcessMessage() { { name: "validator has been initialized", mock: func() { - mockState := mockState.NewMockBeaconStateReader(t.gomockCtrl) - mockValidator := solid.NewValidatorFromParameters( - [48]byte{}, - [32]byte{}, - 0, - false, - 0, - 0, - curEpoch+1, - 0, - ) - mockState.EXPECT().ValidatorForValidatorIndex(int(mockValidatorIndex)).Return(mockValidator, nil).Times(1) - t.syncedData.EXPECT().HeadStateReader().Return(mockState, synced_data.EmptyCancel).Times(1) + // mockState := mockState.NewMockBeaconStateReader(t.gomockCtrl) + // mockValidator := solid.NewValidatorFromParameters( + // [48]byte{}, + // [32]byte{}, + // 0, + // false, + // 0, + // 0, + // curEpoch+1, + // 0, + // ) + // mockState.EXPECT().ValidatorForValidatorIndex(int(mockValidatorIndex)).Return(mockValidator, nil).Times(1) t.ethClock.EXPECT().GetCurrentEpoch().Return(curEpoch).Times(1) }, msg: mockMsg, @@ -173,7 +183,6 @@ func (t *voluntaryExitTestSuite) TestProcessMessage() { { name: "bls verify failed", mock: func() { - mockState := mockState.NewMockBeaconStateReader(t.gomockCtrl) mockValidator := solid.NewValidatorFromParameters( [48]byte{}, [32]byte{}, @@ -184,12 +193,11 @@ func (t *voluntaryExitTestSuite) TestProcessMessage() { curEpoch+1, 0, ) - mockState.EXPECT().ValidatorForValidatorIndex(int(mockValidatorIndex)).Return(mockValidator, nil).Times(1) - t.syncedData.EXPECT().HeadStateReader().Return(mockState, synced_data.EmptyCancel).Times(1) + _, st, _ := tests.GetBellatrixRandom() + st.ValidatorSet().Set(int(mockValidatorIndex), mockValidator) + t.syncedData.OnHeadState(st) t.ethClock.EXPECT().GetCurrentEpoch().Return(curEpoch).Times(1) t.beaconCfg.FarFutureEpoch = mockValidator.ExitEpoch() - mockState.EXPECT().Version().Return(clparams.AltairVersion).Times(1) - mockState.EXPECT().GetDomain(t.beaconCfg.DomainVoluntaryExit, mockMsg.SignedVoluntaryExit.VoluntaryExit.Epoch).Return([]byte{}, nil).Times(1) computeSigningRoot = func(_ ssz.HashableSSZ, domain []byte) ([32]byte, error) { return [32]byte{}, nil } @@ -201,7 +209,7 @@ func (t *voluntaryExitTestSuite) TestProcessMessage() { { name: "success", mock: func() { - mockState := mockState.NewMockBeaconStateReader(t.gomockCtrl) + _, st, _ := tests.GetBellatrixRandom() mockValidator := solid.NewValidatorFromParameters( [48]byte{}, [32]byte{}, @@ -212,12 +220,10 @@ func (t *voluntaryExitTestSuite) TestProcessMessage() { curEpoch+1, 0, ) - mockState.EXPECT().ValidatorForValidatorIndex(int(mockValidatorIndex)).Return(mockValidator, nil).Times(1) - t.syncedData.EXPECT().HeadStateReader().Return(mockState, synced_data.EmptyCancel).Times(1) + st.ValidatorSet().Set(int(mockValidatorIndex), mockValidator) + t.syncedData.OnHeadState(st) t.ethClock.EXPECT().GetCurrentEpoch().Return(curEpoch).Times(1) t.beaconCfg.FarFutureEpoch = mockValidator.ExitEpoch() - mockState.EXPECT().Version().Return(clparams.AltairVersion).Times(1) - mockState.EXPECT().GetDomain(t.beaconCfg.DomainVoluntaryExit, mockMsg.SignedVoluntaryExit.VoluntaryExit.Epoch).Return([]byte{}, nil).Times(1) computeSigningRoot = func(_ ssz.HashableSSZ, domain []byte) ([32]byte, error) { return [32]byte{}, nil } diff --git a/cl/phase1/stages/forkchoice.go b/cl/phase1/stages/forkchoice.go index 14916c0d7f6..0e8f96f2082 100644 --- a/cl/phase1/stages/forkchoice.go +++ b/cl/phase1/stages/forkchoice.go @@ -2,6 +2,7 @@ package stages import ( "context" + "errors" "fmt" "os" "runtime" @@ -13,6 +14,7 @@ import ( "github.com/erigontech/erigon-lib/kv" "github.com/erigontech/erigon-lib/log/v3" "github.com/erigontech/erigon/cl/beacon/beaconevents" + "github.com/erigontech/erigon/cl/beacon/synced_data" "github.com/erigontech/erigon/cl/clparams" "github.com/erigontech/erigon/cl/monitor" "github.com/erigontech/erigon/cl/monitor/shuffling_metrics" @@ -29,15 +31,25 @@ import ( // computeAndNotifyServicesOfNewForkChoice calculates the new head of the fork choice and notifies relevant services. // It updates the fork choice if possible and sets the status in the RPC. It returns the head slot, head root, and any error encountered. func computeAndNotifyServicesOfNewForkChoice(ctx context.Context, logger log.Logger, cfg *Cfg) (headSlot uint64, headRoot common.Hash, err error) { - prevHeadState, cn := cfg.syncedData.HeadState() - defer cn() - // Get the current head of the fork choice - headRoot, headSlot, err = cfg.forkChoice.GetHead(prevHeadState) - if err != nil { - err = fmt.Errorf("failed to get head: %w", err) - return + if err = cfg.syncedData.ViewHeadState(func(prevHeadState *state.CachingBeaconState) error { + // Get the current head of the fork choice + headRoot, headSlot, err = cfg.forkChoice.GetHead(prevHeadState) + if err != nil { + return fmt.Errorf("failed to get head: %w", err) + } + return nil + }); err != nil { + if errors.Is(err, synced_data.ErrNotSynced) { + // Get the current head of the fork choice + headRoot, headSlot, err = cfg.forkChoice.GetHead(nil) + if err != nil { + return 0, common.Hash{}, fmt.Errorf("failed to get head: %w", err) + } + } else { + return 0, common.Hash{}, fmt.Errorf("failed to get head: %w", err) + } + } - cn() // Observe the current slot and epoch in the monitor monitor.ObserveCurrentSlot(headSlot) monitor.ObserveCurrentEpoch(headSlot / cfg.beaconCfg.SlotsPerEpoch) @@ -311,34 +323,36 @@ func postForkchoiceOperations(ctx context.Context, tx kv.RwTx, logger log.Logger if err := cfg.syncedData.OnHeadState(headState); err != nil { return fmt.Errorf("failed to set head state: %w", err) } - headState, cn := cfg.syncedData.HeadState() // headState is a copy of the head state here. - defer cn() - // Produce and cache attestation data for validator node (this is not an expensive operation so we can do it for all nodes) - if _, err = cfg.attestationDataProducer.ProduceAndCacheAttestationData(tx, headState, headRoot, headState.Slot(), 0); err != nil { - logger.Warn("failed to produce and cache attestation data", "err", err) - } - // Run indexing routines for the database - if err := runIndexingRoutines(ctx, tx, cfg, headState); err != nil { - return fmt.Errorf("failed to run indexing routines: %w", err) - } + return cfg.syncedData.ViewHeadState(func(headState *state.CachingBeaconState) error { + // Produce and cache attestation data for validator node (this is not an expensive operation so we can do it for all nodes) + if _, err = cfg.attestationDataProducer.ProduceAndCacheAttestationData(tx, headState, headRoot, headState.Slot(), 0); err != nil { + logger.Warn("failed to produce and cache attestation data", "err", err) + } - // Dump the head state on disk for ease of chain reorgs - if err := cfg.forkChoice.DumpBeaconStateOnDisk(headState); err != nil { - return fmt.Errorf("failed to dump beacon state on disk: %w", err) - } + // Run indexing routines for the database + if err := runIndexingRoutines(ctx, tx, cfg, headState); err != nil { + return fmt.Errorf("failed to run indexing routines: %w", err) + } - // Save the head state on disk for eventual node restarts without checkpoint sync - if err := saveHeadStateOnDiskIfNeeded(cfg, headState); err != nil { - return fmt.Errorf("failed to save head state on disk: %w", err) - } - // Lastly, emit the head event - emitHeadEvent(cfg, headSlot, headRoot, headState) - emitNextPaylodAttributesEvent(cfg, headSlot, headRoot, headState) + // Dump the head state on disk for ease of chain reorgs + if err := cfg.forkChoice.DumpBeaconStateOnDisk(headState); err != nil { + return fmt.Errorf("failed to dump beacon state on disk: %w", err) + } + + // Save the head state on disk for eventual node restarts without checkpoint sync + if err := saveHeadStateOnDiskIfNeeded(cfg, headState); err != nil { + return fmt.Errorf("failed to save head state on disk: %w", err) + } + // Lastly, emit the head event + emitHeadEvent(cfg, headSlot, headRoot, headState) + emitNextPaylodAttributesEvent(cfg, headSlot, headRoot, headState) + + // Shuffle validator set for the next epoch + preCacheNextShuffledValidatorSet(ctx, logger, cfg, headState) + return nil + }) - // Shuffle validator set for the next epoch - preCacheNextShuffledValidatorSet(ctx, logger, cfg, headState) - return nil } // doForkchoiceRoutine performs the fork choice routine by computing the new fork choice, updating the canonical chain in the database, diff --git a/cl/spectest/consensus_tests/fork_choice.go b/cl/spectest/consensus_tests/fork_choice.go index 1872b32371b..e31950d3825 100644 --- a/cl/spectest/consensus_tests/fork_choice.go +++ b/cl/spectest/consensus_tests/fork_choice.go @@ -209,7 +209,7 @@ func (b *ForkChoice) Run(t *testing.T, root fs.FS, c spectest.TestCase) (err err forkStore, err := forkchoice.NewForkChoiceStore( ethClock, anchorState, nil, pool.NewOperationsPool(&clparams.MainnetBeaconConfig), fork_graph.NewForkGraphDisk(anchorState, afero.NewMemMapFs(), beacon_router_configuration.RouterConfiguration{}, emitters), - emitters, synced_data.NewSyncedDataManager(true, &clparams.MainnetBeaconConfig), blobStorage, validatorMonitor, false) + emitters, synced_data.NewSyncedDataManager(&clparams.MainnetBeaconConfig, true, 0), blobStorage, validatorMonitor, false) require.NoError(t, err) forkStore.SetSynced(true) diff --git a/cl/transition/impl/eth2/statechange/finalization_and_justification.go b/cl/transition/impl/eth2/statechange/finalization_and_justification.go index 782807662c1..dca206396cd 100644 --- a/cl/transition/impl/eth2/statechange/finalization_and_justification.go +++ b/cl/transition/impl/eth2/statechange/finalization_and_justification.go @@ -142,7 +142,7 @@ func computePreviousAndCurrentTargetBalancePostAltair(s abstract.BeaconState, un shardSize = s.ValidatorSet().Length() } - wp := threading.CreateWorkerPool(numWorkers) + wp := threading.NewParallelExecutor() for i := 0; i < numWorkers; i++ { workerID := i from := workerID * shardSize @@ -182,7 +182,7 @@ func computePreviousAndCurrentTargetBalancePostAltair(s abstract.BeaconState, un } } - wp.WaitAndClose() + wp.Execute() for i := 0; i < numWorkers; i++ { previousTargetBalance += previousTargetBalanceShards[i] diff --git a/cl/transition/impl/eth2/statechange/process_rewards_and_penalties.go b/cl/transition/impl/eth2/statechange/process_rewards_and_penalties.go index 849f7c5984f..2f564a093f7 100644 --- a/cl/transition/impl/eth2/statechange/process_rewards_and_penalties.go +++ b/cl/transition/impl/eth2/statechange/process_rewards_and_penalties.go @@ -34,7 +34,7 @@ func getFlagsTotalBalances(s abstract.BeaconState, flagsUnslashedIndiciesSet [][ flagsTotalBalances := make([]uint64, len(weights)) numWorkers := runtime.NumCPU() - wp := threading.CreateWorkerPool(numWorkers) + wp := threading.NewParallelExecutor() flagsTotalBalancesShards := make([][]uint64, len(weights)) shardSize := s.ValidatorLength() / numWorkers @@ -70,7 +70,7 @@ func getFlagsTotalBalances(s abstract.BeaconState, flagsUnslashedIndiciesSet [][ } } - wp.WaitAndClose() + wp.Execute() for i := range weights { for j := 0; j < numWorkers; j++ { diff --git a/cl/utils/threading/parallel_executor.go b/cl/utils/threading/parallel_executor.go new file mode 100644 index 00000000000..21cd785c6ce --- /dev/null +++ b/cl/utils/threading/parallel_executor.go @@ -0,0 +1,74 @@ +package threading + +import ( + "fmt" + "sync" + "time" + + "github.com/erigontech/erigon-lib/common/dbg" +) + +type ParallelExecutor struct { + jobs []func() error + wg sync.WaitGroup +} + +// CreateWorkerPool initializes a pool of workers to process tasks. +func NewParallelExecutor() *ParallelExecutor { + return &ParallelExecutor{} +} + +// close work channel and finish +func (wp *ParallelExecutor) Execute() error { + var errOut error + if dbg.CaplinSyncedDataMangerDeadlockDetection { + st := dbg.Stack() + ch := make(chan struct{}) + go func() { + select { + case <-ch: + case <-time.After(100 * time.Second): + fmt.Println("Deadlock detected - ParallelExecutor", st) + } + }() + defer close(ch) + } + for _, job := range wp.jobs { + wp.wg.Add(1) + go func(job func() error) { + defer wp.wg.Done() + if err := job(); err != nil { + errOut = err + } + }(job) + } + wp.wg.Wait() + return errOut +} + +// enqueue work +func (wp *ParallelExecutor) AddWork(f func() error) { + wp.jobs = append(wp.jobs, f) +} + +func ParallellForLoop(numWorkers int, from, to int, f func(int) error) error { + // divide the work into numWorkers parts + size := (to - from) / numWorkers + wp := ParallelExecutor{} + for i := 0; i < numWorkers; i++ { + start := from + i*size + end := start + size + if i == numWorkers-1 { + end = to + } + wp.AddWork(func() error { + for j := start; j < end; j++ { + if err := f(j); err != nil { + return err + } + } + return nil + }) + } + return wp.Execute() +} diff --git a/cl/utils/threading/worker_pool.go b/cl/utils/threading/worker_pool.go deleted file mode 100644 index 3e20c2f65be..00000000000 --- a/cl/utils/threading/worker_pool.go +++ /dev/null @@ -1,79 +0,0 @@ -package threading - -import ( - "sync" - "sync/atomic" - "unsafe" -) - -type WorkerPool struct { - work chan func() error - wg sync.WaitGroup - atomicErr unsafe.Pointer -} - -// CreateWorkerPool initializes a pool of workers to process tasks. -func CreateWorkerPool(numWorkers int) *WorkerPool { - wp := WorkerPool{ - work: make(chan func() error, 1000), - } - for i := 1; i <= numWorkers; i++ { - go wp.StartWorker() - } - return &wp -} - -// close work channel and finish -func (wp *WorkerPool) WaitAndClose() { - // Wait for all workers to finish. - wp.wg.Wait() - // Close the task channel to indicate no more tasks will be sent. - close(wp.work) -} - -// Worker is the worker that processes tasks. -func (wp *WorkerPool) StartWorker() { - for task := range wp.work { - if err := task(); err != nil { - atomic.StorePointer(&wp.atomicErr, unsafe.Pointer(&err)) - } - wp.wg.Done() - } -} - -func (wp *WorkerPool) Error() error { - errPointer := atomic.LoadPointer(&wp.atomicErr) - if errPointer == nil { - return nil - } - return *(*error)(errPointer) -} - -// enqueue work -func (wp *WorkerPool) AddWork(f func() error) { - wp.wg.Add(1) - wp.work <- f -} - -func ParallellForLoop(numWorkers int, from, to int, f func(int) error) error { - // divide the work into numWorkers parts - size := (to - from) / numWorkers - wp := CreateWorkerPool(numWorkers) - for i := 0; i < numWorkers; i++ { - start := from + i*size - end := start + size - if i == numWorkers-1 { - end = to - } - wp.AddWork(func() error { - for j := start; j < end; j++ { - if err := f(j); err != nil { - return err - } - } - return nil - }) - } - wp.WaitAndClose() - return wp.Error() -} diff --git a/cl/validator/committee_subscription/committee_subscription.go b/cl/validator/committee_subscription/committee_subscription.go index ca845430664..ca8c9dffadd 100644 --- a/cl/validator/committee_subscription/committee_subscription.go +++ b/cl/validator/committee_subscription/committee_subscription.go @@ -99,15 +99,12 @@ func (c *CommitteeSubscribeMgmt) AddAttestationSubscription(ctx context.Context, cIndex = p.CommitteeIndex ) - headState, cn := c.syncedData.HeadState() - defer cn() - if headState == nil { + if c.syncedData.Syncing() { return errors.New("head state not available") } log.Trace("Add attestation subscription", "slot", slot, "committeeIndex", cIndex, "isAggregator", p.IsAggregator, "validatorIndex", p.ValidatorIndex) - commiteePerSlot := headState.CommitteeCount(p.Slot / c.beaconConfig.SlotsPerEpoch) - cn() + commiteePerSlot := c.syncedData.CommitteeCount(p.Slot / c.beaconConfig.SlotsPerEpoch) subnetId := subnets.ComputeSubnetForAttestation(commiteePerSlot, slot, cIndex, c.beaconConfig.SlotsPerEpoch, c.netConfig.AttestationSubnetCount) // add validator to subscription c.validatorSubsMutex.Lock() diff --git a/cmd/caplin/caplin1/run.go b/cmd/caplin/caplin1/run.go index 0d059a76f4b..21a122af25e 100644 --- a/cmd/caplin/caplin1/run.go +++ b/cmd/caplin/caplin1/run.go @@ -253,7 +253,7 @@ func RunCaplinService(ctx context.Context, engine execution_client.ExecutionEngi return err } fcuFs := afero.NewBasePathFs(afero.NewOsFs(), caplinFcuPath) - syncedDataManager := synced_data.NewSyncedDataManager(true, beaconConfig) + syncedDataManager := synced_data.NewSyncedDataManager(beaconConfig, true, synced_data.MinHeadStateDelay) syncContributionPool := sync_contribution_pool.NewSyncContributionPool(beaconConfig) emitters := beaconevents.NewEventEmitter() diff --git a/erigon-lib/common/dbg/experiments.go b/erigon-lib/common/dbg/experiments.go index b934bf815ec..e5e2a8aae73 100644 --- a/erigon-lib/common/dbg/experiments.go +++ b/erigon-lib/common/dbg/experiments.go @@ -67,6 +67,8 @@ var ( OnlyCreateDB = EnvBool("ONLY_CREATE_DB", false) CommitEachStage = EnvBool("COMMIT_EACH_STAGE", false) + + CaplinSyncedDataMangerDeadlockDetection = EnvBool("CAPLIN_SYNCED_DATA_MANAGER_DEADLOCK_DETECTION", true) ) func ReadMemStats(m *runtime.MemStats) { From 76f4107c093dacede8222b01e936e1ee4eba9774 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Thu, 7 Nov 2024 11:47:26 +0100 Subject: [PATCH 28/28] Caplin: Automatic retirement of state tables to their own snapshot files (#12508) --- cl/antiquary/antiquary.go | 5 +- cl/antiquary/state_antiquary.go | 134 +++- cl/antiquary/state_antiquary_test.go | 2 +- cl/beacon/handler/attestation_rewards.go | 14 +- cl/beacon/handler/committees.go | 9 +- cl/beacon/handler/duties_attester.go | 11 +- cl/beacon/handler/duties_sync.go | 6 +- cl/beacon/handler/handler.go | 52 +- cl/beacon/handler/lighthouse.go | 24 +- cl/beacon/handler/liveness.go | 7 +- cl/beacon/handler/rewards.go | 14 +- cl/beacon/handler/states.go | 22 +- cl/beacon/handler/states_test.go | 29 +- cl/beacon/handler/utils_test.go | 5 +- cl/beacon/handler/validator_test.go | 1 + cl/beacon/handler/validators.go | 25 +- .../attesting_indicies.go | 31 +- .../historical_states_reader.go | 280 ++++--- .../historical_states_reader_test.go | 4 +- cl/persistence/state/state_accessors.go | 41 +- cl/persistence/state/validator_events.go | 4 + cl/sentinel/sentinel_requests_test.go | 2 +- cmd/capcli/cli.go | 85 ++- cmd/caplin/caplin1/run.go | 11 +- erigon-lib/common/datadir/dirs.go | 4 +- erigon-lib/downloader/downloader.go | 11 +- erigon-lib/downloader/snaptype/files.go | 3 + erigon-lib/downloader/snaptype/type.go | 61 ++ erigon-lib/downloader/util.go | 7 +- turbo/snapshotsync/caplin_state_snapshots.go | 712 ++++++++++++++++++ .../freezeblocks/caplin_snapshots.go | 35 +- turbo/snapshotsync/snapshots.go | 25 + 32 files changed, 1407 insertions(+), 269 deletions(-) create mode 100644 turbo/snapshotsync/caplin_state_snapshots.go diff --git a/cl/antiquary/antiquary.go b/cl/antiquary/antiquary.go index 7f49882bfcd..79d736b9708 100644 --- a/cl/antiquary/antiquary.go +++ b/cl/antiquary/antiquary.go @@ -37,6 +37,7 @@ import ( "github.com/erigontech/erigon/cl/persistence/blob_storage" state_accessors "github.com/erigontech/erigon/cl/persistence/state" "github.com/erigontech/erigon/cl/phase1/core/state" + "github.com/erigontech/erigon/turbo/snapshotsync" "github.com/erigontech/erigon/turbo/snapshotsync/freezeblocks" ) @@ -50,6 +51,7 @@ type Antiquary struct { downloader proto_downloader.DownloaderClient logger log.Logger sn *freezeblocks.CaplinSnapshots + stateSn *snapshotsync.CaplinStateSnapshots snReader freezeblocks.BeaconSnapshotReader snBuildSema *semaphore.Weighted // semaphore for building only one type (blocks, caplin, v3) at a time ctx context.Context @@ -65,7 +67,7 @@ type Antiquary struct { balances32 []byte } -func NewAntiquary(ctx context.Context, blobStorage blob_storage.BlobStorage, genesisState *state.CachingBeaconState, validatorsTable *state_accessors.StaticValidatorTable, cfg *clparams.BeaconChainConfig, dirs datadir.Dirs, downloader proto_downloader.DownloaderClient, mainDB kv.RwDB, sn *freezeblocks.CaplinSnapshots, reader freezeblocks.BeaconSnapshotReader, logger log.Logger, states, blocks, blobs, snapgen bool, snBuildSema *semaphore.Weighted) *Antiquary { +func NewAntiquary(ctx context.Context, blobStorage blob_storage.BlobStorage, genesisState *state.CachingBeaconState, validatorsTable *state_accessors.StaticValidatorTable, cfg *clparams.BeaconChainConfig, dirs datadir.Dirs, downloader proto_downloader.DownloaderClient, mainDB kv.RwDB, stateSn *snapshotsync.CaplinStateSnapshots, sn *freezeblocks.CaplinSnapshots, reader freezeblocks.BeaconSnapshotReader, logger log.Logger, states, blocks, blobs, snapgen bool, snBuildSema *semaphore.Weighted) *Antiquary { backfilled := &atomic.Bool{} blobBackfilled := &atomic.Bool{} backfilled.Store(false) @@ -89,6 +91,7 @@ func NewAntiquary(ctx context.Context, blobStorage blob_storage.BlobStorage, gen blocks: blocks, blobs: blobs, snapgen: snapgen, + stateSn: stateSn, } } diff --git a/cl/antiquary/state_antiquary.go b/cl/antiquary/state_antiquary.go index efb5238954b..b8f44621fde 100644 --- a/cl/antiquary/state_antiquary.go +++ b/cl/antiquary/state_antiquary.go @@ -27,7 +27,9 @@ import ( "github.com/erigontech/erigon-lib/common" libcommon "github.com/erigontech/erigon-lib/common" + "github.com/erigontech/erigon-lib/downloader/snaptype" "github.com/erigontech/erigon-lib/etl" + proto_downloader "github.com/erigontech/erigon-lib/gointerfaces/downloaderproto" "github.com/erigontech/erigon-lib/kv" "github.com/erigontech/erigon-lib/log/v3" "github.com/erigontech/erigon/cl/clparams" @@ -42,6 +44,7 @@ import ( "github.com/erigontech/erigon/cl/phase1/core/state/raw" "github.com/erigontech/erigon/cl/transition" "github.com/erigontech/erigon/cl/transition/impl/eth2" + "github.com/erigontech/erigon/turbo/snapshotsync" ) // pool for buffers @@ -111,6 +114,9 @@ func (s *Antiquary) readHistoricalProcessingProgress(ctx context.Context) (progr if err != nil { return } + if s.stateSn != nil { + progress = max(progress, s.stateSn.BlocksAvailable()) + } finalized, err = beacon_indicies.ReadHighestFinalized(tx) if err != nil { @@ -119,8 +125,68 @@ func (s *Antiquary) readHistoricalProcessingProgress(ctx context.Context) (progr return } +func FillStaticValidatorsTableIfNeeded(ctx context.Context, logger log.Logger, stateSn *snapshotsync.CaplinStateSnapshots, validatorsTable *state_accessors.StaticValidatorTable) (bool, error) { + if stateSn == nil || validatorsTable.Slot() != 0 { + return false, nil + } + if err := stateSn.OpenFolder(); err != nil { + return false, err + } + blocksAvaiable := stateSn.BlocksAvailable() + stateSnRoTx := stateSn.View() + defer stateSnRoTx.Close() + + start := time.Now() + for slot := uint64(0); slot <= stateSn.BlocksAvailable(); slot++ { + seg, ok := stateSnRoTx.VisibleSegment(slot, kv.StateEvents) + if !ok { + return false, fmt.Errorf("segment not found for slot %d", slot) + } + buf, err := seg.Get(slot) + if err != nil { + return false, err + } + if len(buf) == 0 { + continue + } + event := state_accessors.NewStateEventsFromBytes(buf) + state_accessors.ReplayEvents( + func(validatorIndex uint64, validator solid.Validator) error { + return validatorsTable.AddValidator(validator, validatorIndex, slot) + }, + func(validatorIndex uint64, exitEpoch uint64) error { + return validatorsTable.AddExitEpoch(validatorIndex, slot, exitEpoch) + }, + func(validatorIndex uint64, withdrawableEpoch uint64) error { + return validatorsTable.AddWithdrawableEpoch(validatorIndex, slot, withdrawableEpoch) + }, + func(validatorIndex uint64, withdrawalCredentials libcommon.Hash) error { + return validatorsTable.AddWithdrawalCredentials(validatorIndex, slot, withdrawalCredentials) + }, + func(validatorIndex uint64, activationEpoch uint64) error { + return validatorsTable.AddActivationEpoch(validatorIndex, slot, activationEpoch) + }, + func(validatorIndex uint64, activationEligibilityEpoch uint64) error { + return validatorsTable.AddActivationEligibility(validatorIndex, slot, activationEligibilityEpoch) + }, + func(validatorIndex uint64, slashed bool) error { + return validatorsTable.AddSlashed(validatorIndex, slot, slashed) + }, + event, + ) + validatorsTable.SetSlot(slot) + } + logger.Info("[Antiquary] Filled static validators table", "slots", blocksAvaiable, "elapsed", time.Since(start)) + return true, nil +} + func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error { - var tx kv.Tx + + // Check if you need to fill the static validators table + refilledStaticValidators, err := FillStaticValidatorsTableIfNeeded(ctx, s.logger, s.stateSn, s.validatorsTable) + if err != nil { + return err + } tx, err := s.mainDB.BeginRo(ctx) if err != nil { @@ -131,6 +197,13 @@ func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error { // maps which validators changes var changedValidators sync.Map + if refilledStaticValidators { + s.validatorsTable.ForEach(func(validatorIndex uint64, validator *state_accessors.StaticValidator) bool { + changedValidators.Store(validatorIndex, struct{}{}) + return true + }) + } + stateAntiquaryCollector := newBeaconStatesCollector(s.cfg, s.dirs.Tmp, s.logger) defer stateAntiquaryCollector.close() @@ -413,6 +486,59 @@ func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error { return err } log.Info("Historical states antiquated", "slot", s.currentState.Slot(), "root", libcommon.Hash(stateRoot), "latency", endTime) + if s.snapgen { + if err := s.stateSn.OpenFolder(); err != nil { + return err + } + + // Keep gnosis out for a bit + if s.currentState.BeaconConfig().ConfigName == "gnosis" { + return nil + } + blocksPerStatefulFile := uint64(snaptype.CaplinMergeLimit * 5) + from := s.stateSn.BlocksAvailable() + 1 + if from+blocksPerStatefulFile+safetyMargin > s.currentState.Slot() { + return nil + } + to := s.currentState.Slot() + if to < (safetyMargin + blocksPerStatefulFile) { + return nil + } + to = to - (safetyMargin + blocksPerStatefulFile) + if from >= to { + return nil + } + if err := s.stateSn.DumpCaplinState( + ctx, + s.stateSn.BlocksAvailable()+1, + to, + blocksPerStatefulFile, + s.sn.Salt, + s.dirs, + 1, + log.LvlInfo, + s.logger, + ); err != nil { + return err + } + paths := s.stateSn.SegFileNames(from, to) + downloadItems := make([]*proto_downloader.AddItem, len(paths)) + for i, path := range paths { + downloadItems[i] = &proto_downloader.AddItem{ + Path: path, + } + } + if s.downloader != nil { + // Notify bittorent to seed the new snapshots + if _, err := s.downloader.Add(s.ctx, &proto_downloader.AddRequest{Items: downloadItems}); err != nil { + s.logger.Warn("[Antiquary] Failed to add items to bittorent", "err", err) + } + } + if err := s.stateSn.OpenFolder(); err != nil { + return err + } + } + return nil } @@ -439,12 +565,15 @@ func (s *Antiquary) initializeStateAntiquaryIfNeeded(ctx context.Context, tx kv. if err != nil { return err } + if s.stateSn != nil { + targetSlot = max(targetSlot, s.stateSn.BlocksAvailable()) + } // We want to backoff by some slots until we get a correct state from DB. // we start from 10 * clparams.SlotsPerDump. backoffStrides := uint64(10) backoffStep := backoffStrides - historicalReader := historical_states_reader.NewHistoricalStatesReader(s.cfg, s.snReader, s.validatorsTable, s.genesisState) + historicalReader := historical_states_reader.NewHistoricalStatesReader(s.cfg, s.snReader, s.validatorsTable, s.genesisState, s.stateSn) for { attempt, err := computeSlotToBeRequested(tx, s.cfg, s.genesisState.Slot(), targetSlot, backoffStep) @@ -465,6 +594,7 @@ func (s *Antiquary) initializeStateAntiquaryIfNeeded(ctx context.Context, tx kv. if err != nil { return fmt.Errorf("failed to read historical state at slot %d: %w", attempt, err) } + if s.currentState == nil { log.Warn("historical state not found, backoff more and try again", "slot", attempt) backoffStep += backoffStrides diff --git a/cl/antiquary/state_antiquary_test.go b/cl/antiquary/state_antiquary_test.go index 08e37c4fc6c..12f8cf8d792 100644 --- a/cl/antiquary/state_antiquary_test.go +++ b/cl/antiquary/state_antiquary_test.go @@ -41,7 +41,7 @@ func runTest(t *testing.T, blocks []*cltypes.SignedBeaconBlock, preState, postSt ctx := context.Background() vt := state_accessors.NewStaticValidatorTable() - a := NewAntiquary(ctx, nil, preState, vt, &clparams.MainnetBeaconConfig, datadir.New("/tmp"), nil, db, nil, reader, log.New(), true, true, true, false, nil) + a := NewAntiquary(ctx, nil, preState, vt, &clparams.MainnetBeaconConfig, datadir.New("/tmp"), nil, db, nil, nil, reader, log.New(), true, true, true, false, nil) require.NoError(t, a.IncrementBeaconState(ctx, blocks[len(blocks)-1].Block.Slot+33)) } diff --git a/cl/beacon/handler/attestation_rewards.go b/cl/beacon/handler/attestation_rewards.go index a315df28bf5..2c175c9f4c1 100644 --- a/cl/beacon/handler/attestation_rewards.go +++ b/cl/beacon/handler/attestation_rewards.go @@ -178,13 +178,17 @@ func (a *ApiHandler) PostEthV1BeaconRewardsAttestations(w http.ResponseWriter, r if lastSlot > stateProgress { return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("requested range is not yet processed or the node is not archivial")) } + snRoTx := a.caplinStateSnapshots.View() + defer snRoTx.Close() - epochData, err := state_accessors.ReadEpochData(tx, a.beaconChainCfg.RoundSlotToEpoch(lastSlot)) + stateGetter := state_accessors.GetValFnTxAndSnapshot(tx, snRoTx) + + epochData, err := state_accessors.ReadEpochData(stateGetter, a.beaconChainCfg.RoundSlotToEpoch(lastSlot)) if err != nil { return nil, err } - validatorSet, err := a.stateReader.ReadValidatorsForHistoricalState(tx, lastSlot) + validatorSet, err := a.stateReader.ReadValidatorsForHistoricalState(tx, stateGetter, lastSlot) if err != nil { return nil, err } @@ -192,12 +196,12 @@ func (a *ApiHandler) PostEthV1BeaconRewardsAttestations(w http.ResponseWriter, r return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("no validator set found for this epoch")) } - _, previousIdx, err := a.stateReader.ReadParticipations(tx, lastSlot) + _, previousIdx, err := a.stateReader.ReadParticipations(tx, stateGetter, lastSlot) if err != nil { return nil, err } - _, _, finalizedCheckpoint, ok, err := state_accessors.ReadCheckpoints(tx, epoch*a.beaconChainCfg.SlotsPerEpoch) + _, _, finalizedCheckpoint, ok, err := state_accessors.ReadCheckpoints(stateGetter, epoch*a.beaconChainCfg.SlotsPerEpoch) if err != nil { return nil, err } @@ -212,7 +216,7 @@ func (a *ApiHandler) PostEthV1BeaconRewardsAttestations(w http.ResponseWriter, r return resp.WithFinalized(true).WithOptimistic(a.forkchoiceStore.IsRootOptimistic(root)), nil } inactivityScores := solid.NewUint64ListSSZ(int(a.beaconChainCfg.ValidatorRegistryLimit)) - if err := a.stateReader.ReconstructUint64ListDump(tx, lastSlot, kv.InactivityScores, validatorSet.Length(), inactivityScores); err != nil { + if err := a.stateReader.ReconstructUint64ListDump(stateGetter, lastSlot, kv.InactivityScores, validatorSet.Length(), inactivityScores); err != nil { return nil, err } resp, err := a.computeAttestationsRewardsForAltair( diff --git a/cl/beacon/handler/committees.go b/cl/beacon/handler/committees.go index 40e2dccf5c1..2a2b324b546 100644 --- a/cl/beacon/handler/committees.go +++ b/cl/beacon/handler/committees.go @@ -123,8 +123,13 @@ func (a *ApiHandler) getCommittees(w http.ResponseWriter, r *http.Request) (*bea return newBeaconResponse(resp).WithFinalized(isFinalized).WithOptimistic(isOptimistic), nil } + snRoTx := a.caplinStateSnapshots.View() + defer snRoTx.Close() + stateGetter := state_accessors.GetValFnTxAndSnapshot(tx, snRoTx) // finality case - activeIdxs, err := state_accessors.ReadActiveIndicies(tx, epoch*a.beaconChainCfg.SlotsPerEpoch) + activeIdxs, err := state_accessors.ReadActiveIndicies( + stateGetter, + epoch*a.beaconChainCfg.SlotsPerEpoch) if err != nil { return nil, err } @@ -138,7 +143,7 @@ func (a *ApiHandler) getCommittees(w http.ResponseWriter, r *http.Request) (*bea } mixPosition := (epoch + a.beaconChainCfg.EpochsPerHistoricalVector - a.beaconChainCfg.MinSeedLookahead - 1) % a.beaconChainCfg.EpochsPerHistoricalVector - mix, err := a.stateReader.ReadRandaoMixBySlotAndIndex(tx, epoch*a.beaconChainCfg.SlotsPerEpoch, mixPosition) + mix, err := a.stateReader.ReadRandaoMixBySlotAndIndex(tx, stateGetter, epoch*a.beaconChainCfg.SlotsPerEpoch, mixPosition) if err != nil { return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("could not read randao mix: %v", err)) } diff --git a/cl/beacon/handler/duties_attester.go b/cl/beacon/handler/duties_attester.go index 5c8ff7b1e75..31040bdbab2 100644 --- a/cl/beacon/handler/duties_attester.go +++ b/cl/beacon/handler/duties_attester.go @@ -155,8 +155,15 @@ func (a *ApiHandler) getAttesterDuties(w http.ResponseWriter, r *http.Request) ( if (epoch)*a.beaconChainCfg.SlotsPerEpoch >= stageStateProgress { return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("epoch %d is too far in the future", epoch)) } + + snRoTx := a.caplinStateSnapshots.View() + defer snRoTx.Close() + + stateGetter := state_accessors.GetValFnTxAndSnapshot(tx, snRoTx) // finality case - activeIdxs, err := state_accessors.ReadActiveIndicies(tx, epoch*a.beaconChainCfg.SlotsPerEpoch) + activeIdxs, err := state_accessors.ReadActiveIndicies( + stateGetter, + epoch*a.beaconChainCfg.SlotsPerEpoch) if err != nil { return nil, err } @@ -170,7 +177,7 @@ func (a *ApiHandler) getAttesterDuties(w http.ResponseWriter, r *http.Request) ( } mixPosition := (epoch + a.beaconChainCfg.EpochsPerHistoricalVector - a.beaconChainCfg.MinSeedLookahead - 1) % a.beaconChainCfg.EpochsPerHistoricalVector - mix, err := a.stateReader.ReadRandaoMixBySlotAndIndex(tx, epoch*a.beaconChainCfg.SlotsPerEpoch, mixPosition) + mix, err := a.stateReader.ReadRandaoMixBySlotAndIndex(tx, stateGetter, epoch*a.beaconChainCfg.SlotsPerEpoch, mixPosition) if err != nil { return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("could not read randao mix: %v", err)) } diff --git a/cl/beacon/handler/duties_sync.go b/cl/beacon/handler/duties_sync.go index 024fd6d45e5..bc4e7cc082a 100644 --- a/cl/beacon/handler/duties_sync.go +++ b/cl/beacon/handler/duties_sync.go @@ -81,9 +81,13 @@ func (a *ApiHandler) getSyncDuties(w http.ResponseWriter, r *http.Request) (*bea if !ok { _, syncCommittee, ok = a.forkchoiceStore.GetSyncCommittees(period - 1) } + snRoTx := a.caplinStateSnapshots.View() + defer snRoTx.Close() // Read them from the archive node if we do not have them in the fast-access storage if !ok { - syncCommittee, err = state_accessors.ReadCurrentSyncCommittee(tx, a.beaconChainCfg.RoundSlotToSyncCommitteePeriod(startSlotAtEpoch)) + syncCommittee, err = state_accessors.ReadCurrentSyncCommittee( + state_accessors.GetValFnTxAndSnapshot(tx, snRoTx), + a.beaconChainCfg.RoundSlotToSyncCommitteePeriod(startSlotAtEpoch)) if syncCommittee == nil { log.Warn("could not find sync committee for epoch", "epoch", epoch, "period", period) return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("could not find sync committee for epoch %d", epoch)) diff --git a/cl/beacon/handler/handler.go b/cl/beacon/handler/handler.go index 5d74b12226a..76ce92e4b43 100644 --- a/cl/beacon/handler/handler.go +++ b/cl/beacon/handler/handler.go @@ -49,6 +49,7 @@ import ( "github.com/erigontech/erigon/cl/validator/committee_subscription" "github.com/erigontech/erigon/cl/validator/sync_contribution_pool" "github.com/erigontech/erigon/cl/validator/validator_params" + "github.com/erigontech/erigon/turbo/snapshotsync" "github.com/erigontech/erigon/turbo/snapshotsync/freezeblocks" ) @@ -64,18 +65,19 @@ type ApiHandler struct { o sync.Once mux *chi.Mux - blockReader freezeblocks.BeaconSnapshotReader - indiciesDB kv.RwDB - netConfig *clparams.NetworkConfig - ethClock eth_clock.EthereumClock - beaconChainCfg *clparams.BeaconChainConfig - forkchoiceStore forkchoice.ForkChoiceStorage - operationsPool pool.OperationsPool - syncedData synced_data.SyncedData - stateReader *historical_states_reader.HistoricalStatesReader - sentinel sentinel.SentinelClient - blobStoage blob_storage.BlobStorage - caplinSnapshots *freezeblocks.CaplinSnapshots + blockReader freezeblocks.BeaconSnapshotReader + indiciesDB kv.RwDB + netConfig *clparams.NetworkConfig + ethClock eth_clock.EthereumClock + beaconChainCfg *clparams.BeaconChainConfig + forkchoiceStore forkchoice.ForkChoiceStorage + operationsPool pool.OperationsPool + syncedData synced_data.SyncedData + stateReader *historical_states_reader.HistoricalStatesReader + sentinel sentinel.SentinelClient + blobStoage blob_storage.BlobStorage + caplinSnapshots *freezeblocks.CaplinSnapshots + caplinStateSnapshots *snapshotsync.CaplinStateSnapshots version string // Node's version @@ -143,6 +145,7 @@ func NewApiHandler( proposerSlashingService services.ProposerSlashingService, builderClient builder.BuilderClient, validatorMonitor monitor.ValidatorMonitor, + caplinStateSnapshots *snapshotsync.CaplinStateSnapshots, enableMemoizedHeadState bool, ) *ApiHandler { blobBundles, err := lru.New[common.Bytes48, BlobBundle]("blobs", maxBlobBundleCacheSize) @@ -150,18 +153,19 @@ func NewApiHandler( panic(err) } return &ApiHandler{ - logger: logger, - validatorParams: validatorParams, - o: sync.Once{}, - netConfig: netConfig, - ethClock: ethClock, - beaconChainCfg: beaconChainConfig, - indiciesDB: indiciesDB, - forkchoiceStore: forkchoiceStore, - operationsPool: operationsPool, - blockReader: rcsn, - syncedData: syncedData, - stateReader: stateReader, + logger: logger, + validatorParams: validatorParams, + o: sync.Once{}, + netConfig: netConfig, + ethClock: ethClock, + beaconChainCfg: beaconChainConfig, + indiciesDB: indiciesDB, + forkchoiceStore: forkchoiceStore, + operationsPool: operationsPool, + blockReader: rcsn, + syncedData: syncedData, + stateReader: stateReader, + caplinStateSnapshots: caplinStateSnapshots, randaoMixesPool: sync.Pool{New: func() interface{} { return solid.NewHashVector(int(beaconChainConfig.EpochsPerHistoricalVector)) }}, diff --git a/cl/beacon/handler/lighthouse.go b/cl/beacon/handler/lighthouse.go index 612cb31e480..f2e978f5e56 100644 --- a/cl/beacon/handler/lighthouse.go +++ b/cl/beacon/handler/lighthouse.go @@ -76,6 +76,10 @@ func (a *ApiHandler) GetLighthouseValidatorInclusionGlobal(w http.ResponseWriter } defer tx.Rollback() + snRoTx := a.caplinStateSnapshots.View() + defer snRoTx.Close() + stateGetter := state_accessors.GetValFnTxAndSnapshot(tx, snRoTx) + slot := epoch * a.beaconChainCfg.SlotsPerEpoch if slot >= a.forkchoiceStore.LowestAvailableSlot() { // Take data from forkchoice @@ -120,29 +124,30 @@ func (a *ApiHandler) GetLighthouseValidatorInclusionGlobal(w http.ResponseWriter } // read the epoch datas first - epochData, err := state_accessors.ReadEpochData(tx, epoch*a.beaconChainCfg.SlotsPerEpoch) + epochData, err := state_accessors.ReadEpochData(stateGetter, epoch*a.beaconChainCfg.SlotsPerEpoch) if err != nil { return nil, err } if epochData == nil { return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("epoch data not found for current epoch")) } - prevEpochData, err := state_accessors.ReadEpochData(tx, prevEpoch*a.beaconChainCfg.SlotsPerEpoch) + prevEpochData, err := state_accessors.ReadEpochData(stateGetter, prevEpoch*a.beaconChainCfg.SlotsPerEpoch) if err != nil { return nil, err } if prevEpochData == nil { return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("epoch data not found for previous epoch")) } + // read the validator set - validatorSet, err := a.stateReader.ReadValidatorsForHistoricalState(tx, slot) + validatorSet, err := a.stateReader.ReadValidatorsForHistoricalState(tx, stateGetter, slot) if err != nil { return nil, err } if validatorSet == nil { return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("validator set not found for current epoch")) } - currentEpochParticipation, previousEpochParticipation, err := a.stateReader.ReadParticipations(tx, slot+(a.beaconChainCfg.SlotsPerEpoch-1)) + currentEpochParticipation, previousEpochParticipation, err := a.stateReader.ReadParticipations(tx, stateGetter, slot+(a.beaconChainCfg.SlotsPerEpoch-1)) if err != nil { return nil, err } @@ -277,15 +282,18 @@ func (a *ApiHandler) GetLighthouseValidatorInclusion(w http.ResponseWriter, r *h return newBeaconResponse(a.computeLighthouseValidatorInclusion(int(validatorIndex), prevEpoch, epoch, activeBalance, prevActiveBalance, validatorSet, currentEpochParticipation, previousEpochParticipation)), nil } + snRoTx := a.caplinStateSnapshots.View() + defer snRoTx.Close() + stateGetter := state_accessors.GetValFnTxAndSnapshot(tx, snRoTx) // read the epoch datas first - epochData, err := state_accessors.ReadEpochData(tx, epoch*a.beaconChainCfg.SlotsPerEpoch) + epochData, err := state_accessors.ReadEpochData(stateGetter, epoch*a.beaconChainCfg.SlotsPerEpoch) if err != nil { return nil, err } if epochData == nil { return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("epoch data not found for current epoch")) } - prevEpochData, err := state_accessors.ReadEpochData(tx, prevEpoch*a.beaconChainCfg.SlotsPerEpoch) + prevEpochData, err := state_accessors.ReadEpochData(stateGetter, prevEpoch*a.beaconChainCfg.SlotsPerEpoch) if err != nil { return nil, err } @@ -293,14 +301,14 @@ func (a *ApiHandler) GetLighthouseValidatorInclusion(w http.ResponseWriter, r *h return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("epoch data not found for previous epoch")) } // read the validator set - validatorSet, err := a.stateReader.ReadValidatorsForHistoricalState(tx, slot) + validatorSet, err := a.stateReader.ReadValidatorsForHistoricalState(tx, stateGetter, slot) if err != nil { return nil, err } if validatorSet == nil { return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("validator set not found for current epoch")) } - currentEpochParticipation, previousEpochParticipation, err := a.stateReader.ReadParticipations(tx, slot+(a.beaconChainCfg.SlotsPerEpoch-1)) + currentEpochParticipation, previousEpochParticipation, err := a.stateReader.ReadParticipations(tx, stateGetter, slot+(a.beaconChainCfg.SlotsPerEpoch-1)) if err != nil { return nil, err } diff --git a/cl/beacon/handler/liveness.go b/cl/beacon/handler/liveness.go index ccce105a571..f81d2e74621 100644 --- a/cl/beacon/handler/liveness.go +++ b/cl/beacon/handler/liveness.go @@ -28,6 +28,7 @@ import ( "github.com/erigontech/erigon/cl/beacon/beaconhttp" "github.com/erigontech/erigon/cl/cltypes" "github.com/erigontech/erigon/cl/cltypes/solid" + state_accessors "github.com/erigontech/erigon/cl/persistence/state" ) type live struct { @@ -138,11 +139,15 @@ func (a *ApiHandler) obtainCurrentEpochParticipationFromEpoch(tx kv.Tx, epoch ui if epoch > 0 { prevEpoch-- } + snRoTx := a.caplinStateSnapshots.View() + defer snRoTx.Close() + + stateGetter := state_accessors.GetValFnTxAndSnapshot(tx, snRoTx) currParticipation, ok1 := a.forkchoiceStore.Participation(epoch) prevParticipation, ok2 := a.forkchoiceStore.Participation(prevEpoch) if !ok1 || !ok2 { - return a.stateReader.ReadParticipations(tx, blockSlot) + return a.stateReader.ReadParticipations(tx, stateGetter, blockSlot) } return currParticipation, prevParticipation, nil diff --git a/cl/beacon/handler/rewards.go b/cl/beacon/handler/rewards.go index bec4923de39..6a302207020 100644 --- a/cl/beacon/handler/rewards.go +++ b/cl/beacon/handler/rewards.go @@ -81,7 +81,11 @@ func (a *ApiHandler) GetEthV1BeaconRewardsBlocks(w http.ResponseWriter, r *http. Total: blkRewards.Attestations + blkRewards.ProposerSlashings + blkRewards.AttesterSlashings + blkRewards.SyncAggregate, }).WithFinalized(isFinalized).WithOptimistic(isOptimistic), nil } - slotData, err := state_accessors.ReadSlotData(tx, slot) + snRoTx := a.caplinStateSnapshots.View() + defer snRoTx.Close() + + stateGetter := state_accessors.GetValFnTxAndSnapshot(tx, snRoTx) + slotData, err := state_accessors.ReadSlotData(stateGetter, slot) if err != nil { return nil, err } @@ -165,11 +169,15 @@ func (a *ApiHandler) PostEthV1BeaconRewardsSyncCommittees(w http.ResponseWriter, syncCommittee *solid.SyncCommittee totalActiveBalance uint64 ) + + snRoTx := a.caplinStateSnapshots.View() + defer snRoTx.Close() + getter := state_accessors.GetValFnTxAndSnapshot(tx, snRoTx) if slot < a.forkchoiceStore.LowestAvailableSlot() { if !isCanonical { return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("non-canonical finalized block not found")) } - epochData, err := state_accessors.ReadEpochData(tx, a.beaconChainCfg.RoundSlotToEpoch(blk.Block.Slot)) + epochData, err := state_accessors.ReadEpochData(getter, a.beaconChainCfg.RoundSlotToEpoch(blk.Block.Slot)) if err != nil { return nil, err } @@ -177,7 +185,7 @@ func (a *ApiHandler) PostEthV1BeaconRewardsSyncCommittees(w http.ResponseWriter, return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("could not read historical sync committee rewards, node may not be archive or it still processing historical states")) } totalActiveBalance = epochData.TotalActiveBalance - syncCommittee, err = state_accessors.ReadCurrentSyncCommittee(tx, a.beaconChainCfg.RoundSlotToSyncCommitteePeriod(blk.Block.Slot)) + syncCommittee, err = state_accessors.ReadCurrentSyncCommittee(getter, a.beaconChainCfg.RoundSlotToSyncCommitteePeriod(blk.Block.Slot)) if err != nil { return nil, err } diff --git a/cl/beacon/handler/states.go b/cl/beacon/handler/states.go index 81e31957125..e781433c74a 100644 --- a/cl/beacon/handler/states.go +++ b/cl/beacon/handler/states.go @@ -259,8 +259,13 @@ func (a *ApiHandler) getFinalityCheckpoints(w http.ResponseWriter, r *http.Reque if err != nil { return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err) } + + snRoTx := a.caplinStateSnapshots.View() + defer snRoTx.Close() + + stateGetter := state_accessors.GetValFnTxAndSnapshot(tx, snRoTx) if !ok { - currentJustifiedCheckpoint, previousJustifiedCheckpoint, finalizedCheckpoint, ok, err = state_accessors.ReadCheckpoints(tx, a.beaconChainCfg.RoundSlotToEpoch(*slot)) + currentJustifiedCheckpoint, previousJustifiedCheckpoint, finalizedCheckpoint, ok, err = state_accessors.ReadCheckpoints(stateGetter, a.beaconChainCfg.RoundSlotToEpoch(*slot)) if err != nil { return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err) } @@ -314,16 +319,21 @@ func (a *ApiHandler) getSyncCommittees(w http.ResponseWriter, r *http.Request) ( return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("could not read block slot: %x", blockRoot)) } + snRoTx := a.caplinStateSnapshots.View() + defer snRoTx.Close() + + stateGetter := state_accessors.GetValFnTxAndSnapshot(tx, snRoTx) + // Code here currentSyncCommittee, nextSyncCommittee, ok := a.forkchoiceStore.GetSyncCommittees(a.beaconChainCfg.SyncCommitteePeriod(*slot)) if !ok { syncCommitteeSlot := a.beaconChainCfg.RoundSlotToSyncCommitteePeriod(*slot) // Check the main database if it cannot be found in the forkchoice store - currentSyncCommittee, err = state_accessors.ReadCurrentSyncCommittee(tx, syncCommitteeSlot) + currentSyncCommittee, err = state_accessors.ReadCurrentSyncCommittee(stateGetter, syncCommitteeSlot) if err != nil { return nil, err } - nextSyncCommittee, err = state_accessors.ReadNextSyncCommittee(tx, syncCommitteeSlot) + nextSyncCommittee, err = state_accessors.ReadNextSyncCommittee(stateGetter, syncCommitteeSlot) if err != nil { return nil, err } @@ -438,7 +448,11 @@ func (a *ApiHandler) getRandao(w http.ResponseWriter, r *http.Request) (*beaconh if canonicalRoot != blockRoot { return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("could not read randao: %x", blockRoot)) } - mix, err := a.stateReader.ReadRandaoMixBySlotAndIndex(tx, slot, epoch%a.beaconChainCfg.EpochsPerHistoricalVector) + snRoTx := a.caplinStateSnapshots.View() + defer snRoTx.Close() + + stateGetter := state_accessors.GetValFnTxAndSnapshot(tx, snRoTx) + mix, err := a.stateReader.ReadRandaoMixBySlotAndIndex(tx, stateGetter, slot, epoch%a.beaconChainCfg.EpochsPerHistoricalVector) if err != nil { return nil, err } diff --git a/cl/beacon/handler/states_test.go b/cl/beacon/handler/states_test.go index c822ed6774f..48035a6e599 100644 --- a/cl/beacon/handler/states_test.go +++ b/cl/beacon/handler/states_test.go @@ -18,6 +18,7 @@ package handler import ( "encoding/json" + "fmt" "io" "net/http" "net/http/httptest" @@ -159,6 +160,7 @@ func TestGetStateFullHistorical(t *testing.T) { // setupTestingHandler(t, clparams.Phase0Version) _, blocks, _, _, postState, handler, _, _, fcu, _ := setupTestingHandler(t, clparams.Phase0Version, log.Root(), true) + fmt.Println("AX") postRoot, err := postState.HashSSZ() require.NoError(t, err) @@ -213,7 +215,32 @@ func TestGetStateFullHistorical(t *testing.T) { require.NoError(t, err) other := state.New(&clparams.MainnetBeaconConfig) require.NoError(t, other.DecodeSSZ(out, int(clparams.Phase0Version))) - + for i := 0; i < other.ValidatorLength(); i++ { + if other.ValidatorSet().Get(i).PublicKey() != postState.ValidatorSet().Get(i).PublicKey() { + fmt.Println("difference in validator", i, other.ValidatorSet().Get(i).PublicKey(), postState.ValidatorSet().Get(i).PublicKey()) + } + if other.ValidatorSet().Get(i).WithdrawalCredentials() != postState.ValidatorSet().Get(i).WithdrawalCredentials() { + fmt.Println("difference in withdrawal", i, other.ValidatorSet().Get(i).WithdrawalCredentials(), postState.ValidatorSet().Get(i).WithdrawalCredentials()) + } + if other.ValidatorSet().Get(i).EffectiveBalance() != postState.ValidatorSet().Get(i).EffectiveBalance() { + fmt.Println("difference in effective", i, other.ValidatorSet().Get(i).EffectiveBalance(), postState.ValidatorSet().Get(i).EffectiveBalance()) + } + if other.ValidatorSet().Get(i).Slashed() != postState.ValidatorSet().Get(i).Slashed() { + fmt.Println("difference in slashed", i, other.ValidatorSet().Get(i).Slashed(), postState.ValidatorSet().Get(i).Slashed()) + } + if other.ValidatorSet().Get(i).ActivationEligibilityEpoch() != postState.ValidatorSet().Get(i).ActivationEligibilityEpoch() { + fmt.Println("difference in activation", i, other.ValidatorSet().Get(i).ActivationEligibilityEpoch(), postState.ValidatorSet().Get(i).ActivationEligibilityEpoch()) + } + if other.ValidatorSet().Get(i).ActivationEpoch() != postState.ValidatorSet().Get(i).ActivationEpoch() { + fmt.Println("difference in activation", i, other.ValidatorSet().Get(i).ActivationEpoch(), postState.ValidatorSet().Get(i).ActivationEpoch()) + } + if other.ValidatorSet().Get(i).ExitEpoch() != postState.ValidatorSet().Get(i).ExitEpoch() { + fmt.Println("difference in exit", i, other.ValidatorSet().Get(i).ExitEpoch(), postState.ValidatorSet().Get(i).ExitEpoch()) + } + if other.ValidatorSet().Get(i).WithdrawableEpoch() != postState.ValidatorSet().Get(i).WithdrawableEpoch() { + fmt.Println("difference in withdrawable", i, other.ValidatorSet().Get(i).WithdrawableEpoch(), postState.ValidatorSet().Get(i).WithdrawableEpoch()) + } + } otherRoot, err := other.HashSSZ() require.NoError(t, err) require.Equal(t, postRoot, otherRoot) diff --git a/cl/beacon/handler/utils_test.go b/cl/beacon/handler/utils_test.go index 83130f74b31..6011154cbff 100644 --- a/cl/beacon/handler/utils_test.go +++ b/cl/beacon/handler/utils_test.go @@ -78,10 +78,10 @@ func setupTestingHandler(t *testing.T, v clparams.StateVersion, logger log.Logge ctx := context.Background() vt := state_accessors.NewStaticValidatorTable() - a := antiquary.NewAntiquary(ctx, nil, preState, vt, &bcfg, datadir.New("/tmp"), nil, db, nil, reader, logger, true, true, false, false, nil) + a := antiquary.NewAntiquary(ctx, nil, preState, vt, &bcfg, datadir.New("/tmp"), nil, db, nil, nil, reader, logger, true, true, false, false, nil) require.NoError(t, a.IncrementBeaconState(ctx, blocks[len(blocks)-1].Block.Slot+33)) // historical states reader below - statesReader := historical_states_reader.NewHistoricalStatesReader(&bcfg, reader, vt, preState) + statesReader := historical_states_reader.NewHistoricalStatesReader(&bcfg, reader, vt, preState, nil) opPool = pool.NewOperationsPool(&bcfg) fcu.Pool = opPool @@ -176,6 +176,7 @@ func setupTestingHandler(t *testing.T, v clparams.StateVersion, logger log.Logge proposerSlashingService, nil, mockValidatorMonitor, + nil, false, ) // TODO: add tests h.Init() diff --git a/cl/beacon/handler/validator_test.go b/cl/beacon/handler/validator_test.go index 53b904ead38..181cc9b9482 100644 --- a/cl/beacon/handler/validator_test.go +++ b/cl/beacon/handler/validator_test.go @@ -75,6 +75,7 @@ func (t *validatorTestSuite) SetupTest() { nil, nil, nil, + nil, false, ) t.gomockCtrl = gomockCtrl diff --git a/cl/beacon/handler/validators.go b/cl/beacon/handler/validators.go index 598501f8320..e9952d006f1 100644 --- a/cl/beacon/handler/validators.go +++ b/cl/beacon/handler/validators.go @@ -338,8 +338,13 @@ func (a *ApiHandler) writeValidatorsResponse( } stateEpoch := *slot / a.beaconChainCfg.SlotsPerEpoch + snRoTx := a.caplinStateSnapshots.View() + defer snRoTx.Close() + + getter := state_accessors.GetValFnTxAndSnapshot(tx, snRoTx) + if *slot < a.forkchoiceStore.LowestAvailableSlot() { - validatorSet, err := a.stateReader.ReadValidatorsForHistoricalState(tx, *slot) + validatorSet, err := a.stateReader.ReadValidatorsForHistoricalState(tx, getter, *slot) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return @@ -347,7 +352,7 @@ func (a *ApiHandler) writeValidatorsResponse( http.Error(w, fmt.Errorf("state not found for slot %v", *slot).Error(), http.StatusNotFound) return } - balances, err := a.stateReader.ReadValidatorsBalances(tx, *slot) + balances, err := a.stateReader.ReadValidatorsBalances(tx, getter, *slot) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return @@ -454,6 +459,11 @@ func (a *ApiHandler) GetEthV1BeaconStatesValidator(w http.ResponseWriter, r *htt return nil, err } + snRoTx := a.caplinStateSnapshots.View() + defer snRoTx.Close() + + getter := state_accessors.GetValFnTxAndSnapshot(tx, snRoTx) + if blockId.Head() { // Lets see if we point to head, if yes then we need to look at the head state we always keep. var ( resp *beaconhttp.BeaconResponse @@ -483,14 +493,14 @@ func (a *ApiHandler) GetEthV1BeaconStatesValidator(w http.ResponseWriter, r *htt stateEpoch := *slot / a.beaconChainCfg.SlotsPerEpoch if *slot < a.forkchoiceStore.LowestAvailableSlot() { - validatorSet, err := a.stateReader.ReadValidatorsForHistoricalState(tx, *slot) + validatorSet, err := a.stateReader.ReadValidatorsForHistoricalState(tx, getter, *slot) if err != nil { return nil, err } if validatorSet == nil { return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("validators not found")) } - balances, err := a.stateReader.ReadValidatorsBalances(tx, *slot) + balances, err := a.stateReader.ReadValidatorsBalances(tx, getter, *slot) if err != nil { return nil, err } @@ -603,8 +613,13 @@ func (a *ApiHandler) getValidatorBalances(ctx context.Context, w http.ResponseWr return } + snRoTx := a.caplinStateSnapshots.View() + defer snRoTx.Close() + + getter := state_accessors.GetValFnTxAndSnapshot(tx, snRoTx) + if *slot < a.forkchoiceStore.LowestAvailableSlot() { - balances, err := a.stateReader.ReadValidatorsBalances(tx, *slot) + balances, err := a.stateReader.ReadValidatorsBalances(tx, getter, *slot) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return diff --git a/cl/persistence/state/historical_states_reader/attesting_indicies.go b/cl/persistence/state/historical_states_reader/attesting_indicies.go index f0ef90b286c..9da5eb8be79 100644 --- a/cl/persistence/state/historical_states_reader/attesting_indicies.go +++ b/cl/persistence/state/historical_states_reader/attesting_indicies.go @@ -19,13 +19,11 @@ package historical_states_reader import ( "errors" "fmt" - "time" libcommon "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/kv" "github.com/erigontech/erigon/cl/clparams" "github.com/erigontech/erigon/cl/cltypes/solid" - "github.com/erigontech/erigon/cl/monitor/shuffling_metrics" "github.com/erigontech/erigon/cl/persistence/base_encoding" state_accessors "github.com/erigontech/erigon/cl/persistence/state" "github.com/erigontech/erigon/cl/phase1/core/state/shuffling" @@ -104,19 +102,9 @@ func (r *HistoricalStatesReader) ComputeCommittee(mix libcommon.Hash, indicies [ start := (lenIndicies * index) / count end := (lenIndicies * (index + 1)) / count var shuffledIndicies []uint64 - epoch := slot / cfg.SlotsPerEpoch - /* - mixPosition := (epoch + cfg.EpochsPerHistoricalVector - cfg.MinSeedLookahead - 1) % cfg.EpochsPerHistoricalVector - */ - if shuffledIndicesInterface, ok := r.shuffledSetsCache.Get(epoch); ok { - shuffledIndicies = shuffledIndicesInterface - } else { - shuffledIndicies = make([]uint64, lenIndicies) - start := time.Now() - shuffledIndicies = shuffling.ComputeShuffledIndicies(cfg, mix, shuffledIndicies, indicies, slot) - shuffling_metrics.ObserveComputeShuffledIndiciesTime(start) - r.shuffledSetsCache.Add(epoch, shuffledIndicies) - } + + shuffledIndicies = make([]uint64, lenIndicies) + shuffledIndicies = shuffling.ComputeShuffledIndicies(cfg, mix, shuffledIndicies, indicies, slot) return shuffledIndicies[start:end], nil } @@ -132,7 +120,7 @@ func committeeCount(cfg *clparams.BeaconChainConfig, epoch uint64, idxs []uint64 return committeCount } -func (r *HistoricalStatesReader) readHistoricalBlockRoot(tx kv.Tx, slot, index uint64) (libcommon.Hash, error) { +func (r *HistoricalStatesReader) readHistoricalBlockRoot(kvGetter state_accessors.GetValFn, slot, index uint64) (libcommon.Hash, error) { slotSubIndex := slot % r.cfg.SlotsPerHistoricalRoot needFromGenesis := true @@ -152,7 +140,7 @@ func (r *HistoricalStatesReader) readHistoricalBlockRoot(tx kv.Tx, slot, index u if needFromGenesis { return r.genesisState.GetBlockRootAtSlot(slot) } - br, err := tx.GetOne(kv.BlockRoot, base_encoding.Encode64ToBytes4(slotLookup)) + br, err := kvGetter(kv.BlockRoot, base_encoding.Encode64ToBytes4(slotLookup)) if err != nil { return libcommon.Hash{}, err } @@ -163,8 +151,9 @@ func (r *HistoricalStatesReader) readHistoricalBlockRoot(tx kv.Tx, slot, index u } -func (r *HistoricalStatesReader) getAttestationParticipationFlagIndicies(tx kv.Tx, version clparams.StateVersion, stateSlot uint64, data solid.AttestationData, inclusionDelay uint64, skipAssert bool) ([]uint8, error) { - currentCheckpoint, previousCheckpoint, _, ok, err := state_accessors.ReadCheckpoints(tx, r.cfg.RoundSlotToEpoch(stateSlot)) +func (r *HistoricalStatesReader) getAttestationParticipationFlagIndicies(tx kv.Tx, getter state_accessors.GetValFn, version clparams.StateVersion, stateSlot uint64, data solid.AttestationData, inclusionDelay uint64, skipAssert bool) ([]uint8, error) { + + currentCheckpoint, previousCheckpoint, _, ok, err := state_accessors.ReadCheckpoints(getter, r.cfg.RoundSlotToEpoch(stateSlot)) if err != nil { return nil, err } @@ -186,13 +175,13 @@ func (r *HistoricalStatesReader) getAttestationParticipationFlagIndicies(tx kv.T return nil, errors.New("GetAttestationParticipationFlagIndicies: source does not match.") } i := (data.Target.Epoch * r.cfg.SlotsPerEpoch) % r.cfg.SlotsPerHistoricalRoot - targetRoot, err := r.readHistoricalBlockRoot(tx, stateSlot, i) + targetRoot, err := r.readHistoricalBlockRoot(getter, stateSlot, i) if err != nil { return nil, err } i = data.Slot % r.cfg.SlotsPerHistoricalRoot - headRoot, err := r.readHistoricalBlockRoot(tx, stateSlot, i) + headRoot, err := r.readHistoricalBlockRoot(getter, stateSlot, i) if err != nil { return nil, err } diff --git a/cl/persistence/state/historical_states_reader/historical_states_reader.go b/cl/persistence/state/historical_states_reader/historical_states_reader.go index d1cd90c670c..6bc33e3b005 100644 --- a/cl/persistence/state/historical_states_reader/historical_states_reader.go +++ b/cl/persistence/state/historical_states_reader/historical_states_reader.go @@ -33,7 +33,7 @@ import ( "github.com/erigontech/erigon/cl/persistence/base_encoding" state_accessors "github.com/erigontech/erigon/cl/persistence/state" "github.com/erigontech/erigon/cl/phase1/core/state" - "github.com/erigontech/erigon/cl/phase1/core/state/lru" + "github.com/erigontech/erigon/turbo/snapshotsync" "github.com/erigontech/erigon/turbo/snapshotsync/freezeblocks" "github.com/klauspost/compress/zstd" ) @@ -46,39 +46,43 @@ type HistoricalStatesReader struct { cfg *clparams.BeaconChainConfig validatorTable *state_accessors.StaticValidatorTable // We can save 80% of the I/O by caching the validator table blockReader freezeblocks.BeaconSnapshotReader + stateSn *snapshotsync.CaplinStateSnapshots genesisState *state.CachingBeaconState - - // cache for shuffled sets - shuffledSetsCache *lru.Cache[uint64, []uint64] } func NewHistoricalStatesReader( cfg *clparams.BeaconChainConfig, blockReader freezeblocks.BeaconSnapshotReader, validatorTable *state_accessors.StaticValidatorTable, - genesisState *state.CachingBeaconState) *HistoricalStatesReader { - - cache, err := lru.New[uint64, []uint64]("shuffledSetsCache_reader", 125) - if err != nil { - panic(err) - } + genesisState *state.CachingBeaconState, stateSn *snapshotsync.CaplinStateSnapshots) *HistoricalStatesReader { return &HistoricalStatesReader{ - cfg: cfg, - blockReader: blockReader, - genesisState: genesisState, - validatorTable: validatorTable, - shuffledSetsCache: cache, + cfg: cfg, + blockReader: blockReader, + genesisState: genesisState, + validatorTable: validatorTable, + stateSn: stateSn, } } func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv.Tx, slot uint64) (*state.CachingBeaconState, error) { + snapshotView := r.stateSn.View() + defer snapshotView.Close() + + kvGetter := state_accessors.GetValFnTxAndSnapshot(tx, snapshotView) + ret := state.New(r.cfg) latestProcessedState, err := state_accessors.GetStateProcessingProgress(tx) if err != nil { return nil, err } + var blocksAvailableInSnapshots uint64 + if r.stateSn != nil { + blocksAvailableInSnapshots = r.stateSn.BlocksAvailable() + } + latestProcessedState = max(latestProcessedState, blocksAvailableInSnapshots) + // If this happens, we need to update our static tables if slot > latestProcessedState || slot > r.validatorTable.Slot() { log.Warn("slot is ahead of the latest processed state", "slot", slot, "latestProcessedState", latestProcessedState, "validatorTableSlot", r.validatorTable.Slot()) @@ -100,7 +104,7 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. blockHeader := block.SignedBeaconBlockHeader().Header blockHeader.Root = common.Hash{} // Read the epoch and per-slot data. - slotData, err := state_accessors.ReadSlotData(tx, slot) + slotData, err := state_accessors.ReadSlotData(kvGetter, slot) if err != nil { return nil, err } @@ -110,7 +114,7 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. } roundedSlot := r.cfg.RoundSlotToEpoch(slot) - epochData, err := state_accessors.ReadEpochData(tx, roundedSlot) + epochData, err := state_accessors.ReadEpochData(kvGetter, roundedSlot) if err != nil { return nil, fmt.Errorf("failed to read epoch data: %w", err) } @@ -129,12 +133,12 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. stateRoots, blockRoots := solid.NewHashVector(int(r.cfg.SlotsPerHistoricalRoot)), solid.NewHashVector(int(r.cfg.SlotsPerHistoricalRoot)) ret.SetLatestBlockHeader(blockHeader) - if err := r.readHistoryHashVector(tx, r.genesisState.BlockRoots(), slot, r.cfg.SlotsPerHistoricalRoot, kv.BlockRoot, blockRoots); err != nil { + if err := r.readHistoryHashVector(tx, kvGetter, r.genesisState.BlockRoots(), slot, r.cfg.SlotsPerHistoricalRoot, kv.BlockRoot, blockRoots); err != nil { return nil, fmt.Errorf("failed to read block roots: %w", err) } ret.SetBlockRoots(blockRoots) - if err := r.readHistoryHashVector(tx, r.genesisState.StateRoots(), slot, r.cfg.SlotsPerHistoricalRoot, kv.StateRoot, stateRoots); err != nil { + if err := r.readHistoryHashVector(tx, kvGetter, r.genesisState.StateRoots(), slot, r.cfg.SlotsPerHistoricalRoot, kv.StateRoot, stateRoots); err != nil { return nil, fmt.Errorf("failed to read state roots: %w", err) } ret.SetStateRoots(stateRoots) @@ -150,14 +154,14 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. // Eth1 eth1DataVotes := solid.NewStaticListSSZ[*cltypes.Eth1Data](int(r.cfg.Eth1DataVotesLength()), 72) - if err := r.readEth1DataVotes(tx, slotData.Eth1DataLength, slot, eth1DataVotes); err != nil { + if err := r.readEth1DataVotes(kvGetter, slotData.Eth1DataLength, slot, eth1DataVotes); err != nil { return nil, fmt.Errorf("failed to read eth1 data votes: %w", err) } ret.SetEth1DataVotes(eth1DataVotes) ret.SetEth1Data(slotData.Eth1Data) ret.SetEth1DepositIndex(slotData.Eth1DepositIndex) // Registry (Validators + Balances) - balancesBytes, err := r.reconstructBalances(tx, slotData.ValidatorLength, slot, kv.ValidatorBalance, kv.BalancesDump) + balancesBytes, err := r.reconstructBalances(tx, kvGetter, slotData.ValidatorLength, slot, kv.ValidatorBalance, kv.BalancesDump) if err != nil { return nil, fmt.Errorf("failed to read validator balances: %w", err) } @@ -168,27 +172,27 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. ret.SetBalances(balances) - validatorSet, err := r.ReadValidatorsForHistoricalState(tx, slot) + validatorSet, err := r.ReadValidatorsForHistoricalState(tx, kvGetter, slot) if err != nil { return nil, fmt.Errorf("failed to read validators: %w", err) } ret.SetValidators(validatorSet) // Randomness randaoMixes := solid.NewHashVector(int(r.cfg.EpochsPerHistoricalVector)) - if err := r.readRandaoMixes(tx, slot, randaoMixes); err != nil { + if err := r.readRandaoMixes(tx, kvGetter, slot, randaoMixes); err != nil { return nil, fmt.Errorf("failed to read randao mixes: %w", err) } ret.SetRandaoMixes(randaoMixes) slashingsVector := solid.NewUint64VectorSSZ(int(r.cfg.EpochsPerSlashingsVector)) // Slashings - err = r.ReconstructUint64ListDump(tx, slot, kv.ValidatorSlashings, int(r.cfg.EpochsPerSlashingsVector), slashingsVector) + err = r.ReconstructUint64ListDump(kvGetter, slot, kv.ValidatorSlashings, int(r.cfg.EpochsPerSlashingsVector), slashingsVector) if err != nil { return nil, fmt.Errorf("failed to read slashings: %w", err) } ret.SetSlashings(slashingsVector) // Finality - currentCheckpoint, previousCheckpoint, finalizedCheckpoint, ok, err := state_accessors.ReadCheckpoints(tx, roundedSlot) + currentCheckpoint, previousCheckpoint, finalizedCheckpoint, ok, err := state_accessors.ReadCheckpoints(kvGetter, roundedSlot) if err != nil { return nil, fmt.Errorf("failed to read checkpoints: %w", err) } @@ -211,7 +215,7 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. ret.SetCurrentEpochAttestations(currentAtts) ret.SetPreviousEpochAttestations(previousAtts) } else { - currentIdxs, previousIdxs, err := r.ReadParticipations(tx, slot) + currentIdxs, previousIdxs, err := r.ReadParticipations(tx, kvGetter, slot) if err != nil { return nil, fmt.Errorf("failed to read participations: %w", err) } @@ -224,7 +228,7 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. } inactivityScores := solid.NewUint64ListSSZ(int(r.cfg.ValidatorRegistryLimit)) // Inactivity - err = r.ReconstructUint64ListDump(tx, slot, kv.InactivityScores, int(slotData.ValidatorLength), inactivityScores) + err = r.ReconstructUint64ListDump(kvGetter, slot, kv.InactivityScores, int(slotData.ValidatorLength), inactivityScores) if err != nil { return nil, fmt.Errorf("failed to read inactivity scores: %w", err) } @@ -232,7 +236,7 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. ret.SetInactivityScoresRaw(inactivityScores) // Sync syncCommitteeSlot := r.cfg.RoundSlotToSyncCommitteePeriod(slot) - currentSyncCommittee, err := state_accessors.ReadCurrentSyncCommittee(tx, syncCommitteeSlot) + currentSyncCommittee, err := state_accessors.ReadCurrentSyncCommittee(kvGetter, syncCommitteeSlot) if err != nil { return nil, fmt.Errorf("failed to read current sync committee: %w", err) } @@ -240,7 +244,7 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. currentSyncCommittee = r.genesisState.CurrentSyncCommittee() } - nextSyncCommittee, err := state_accessors.ReadNextSyncCommittee(tx, syncCommitteeSlot) + nextSyncCommittee, err := state_accessors.ReadNextSyncCommittee(kvGetter, syncCommitteeSlot) if err != nil { return nil, fmt.Errorf("failed to read next sync committee: %w", err) } @@ -277,30 +281,36 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. return ret, nil } -func (r *HistoricalStatesReader) readHistoryHashVector(tx kv.Tx, genesisVector solid.HashVectorSSZ, slot, size uint64, table string, out solid.HashVectorSSZ) (err error) { +func (r *HistoricalStatesReader) readHistoryHashVector(tx kv.Tx, kvGetter state_accessors.GetValFn, genesisVector solid.HashVectorSSZ, slot, size uint64, table string, out solid.HashVectorSSZ) (err error) { var needFromGenesis, inserted uint64 if size > slot || slot-size <= r.genesisState.Slot() { needFromGenesis = size - (slot - r.genesisState.Slot()) } needFromDB := size - needFromGenesis - cursor, err := tx.Cursor(table) + highestAvaiableSlot, err := r.highestSlotInSnapshotsAndDB(tx, table) if err != nil { return err } - defer cursor.Close() + var currKeySlot uint64 - for k, v, err := cursor.Seek(base_encoding.Encode64ToBytes4(slot - needFromDB)); err == nil && k != nil; k, v, err = cursor.Next() { + for i := slot - needFromDB; i <= highestAvaiableSlot; i++ { + key := base_encoding.Encode64ToBytes4(i) + v, err := kvGetter(table, key) + if err != nil { + return err + } if len(v) != 32 { - return fmt.Errorf("invalid key %x", k) + return fmt.Errorf("invalid key %x", key) } - currKeySlot = base_encoding.Decode64FromBytes4(k) + currKeySlot = i out.Set(int(currKeySlot%size), common.BytesToHash(v)) inserted++ if inserted == needFromDB { break } } + for i := 0; i < int(needFromGenesis); i++ { currKeySlot++ out.Set(int(currKeySlot%size), genesisVector.Get(int(currKeySlot%size))) @@ -308,18 +318,8 @@ func (r *HistoricalStatesReader) readHistoryHashVector(tx kv.Tx, genesisVector s return nil } -func (r *HistoricalStatesReader) readEth1DataVotes(tx kv.Tx, eth1DataVotesLength, slot uint64, out *solid.ListSSZ[*cltypes.Eth1Data]) error { +func (r *HistoricalStatesReader) readEth1DataVotes(kvGetter state_accessors.GetValFn, eth1DataVotesLength, slot uint64, out *solid.ListSSZ[*cltypes.Eth1Data]) error { initialSlot := r.cfg.RoundSlotToVotePeriod(slot) - initialKey := base_encoding.Encode64ToBytes4(initialSlot) - cursor, err := tx.Cursor(kv.Eth1DataVotes) - if err != nil { - return err - } - defer cursor.Close() - k, v, err := cursor.Seek(initialKey) - if err != nil { - return err - } if initialSlot <= r.genesisState.Slot() { // We need to prepend the genesis votes for i := 0; i < r.genesisState.Eth1DataVotes().Len(); i++ { @@ -329,24 +329,53 @@ func (r *HistoricalStatesReader) readEth1DataVotes(tx kv.Tx, eth1DataVotesLength endSlot := r.cfg.RoundSlotToVotePeriod(slot + r.cfg.SlotsPerEpoch*r.cfg.EpochsPerEth1VotingPeriod) - for k != nil && base_encoding.Decode64FromBytes4(k) < endSlot { + for i := initialSlot; i < endSlot; i++ { if out.Len() >= int(eth1DataVotesLength) { break } + key := base_encoding.Encode64ToBytes4(i) + v, err := kvGetter(kv.Eth1DataVotes, key) + if err != nil { + return err + } + if len(v) == 0 { + continue + } eth1Data := &cltypes.Eth1Data{} if err := eth1Data.DecodeSSZ(v, 0); err != nil { return err } out.Append(eth1Data) - k, v, err = cursor.Next() - if err != nil { - return err - } } + return nil } -func (r *HistoricalStatesReader) readRandaoMixes(tx kv.Tx, slot uint64, out solid.HashVectorSSZ) error { +func (r *HistoricalStatesReader) highestSlotInSnapshotsAndDB(tx kv.Tx, tbl string) (uint64, error) { + cursor, err := tx.Cursor(tbl) + if err != nil { + return 0, err + } + defer cursor.Close() + k, _, err := cursor.Last() + if err != nil { + return 0, err + } + if k == nil { + if r.stateSn != nil { + return r.stateSn.BlocksAvailable(), nil + } + return 0, nil + } + avaiableInDB := base_encoding.Decode64FromBytes4(k) + var availableInSnapshots uint64 + if r.stateSn != nil { + availableInSnapshots = r.stateSn.BlocksAvailable() + } + return max(avaiableInDB, availableInSnapshots), nil +} + +func (r *HistoricalStatesReader) readRandaoMixes(tx kv.Tx, kvGetter state_accessors.GetValFn, slot uint64, out solid.HashVectorSSZ) error { size := r.cfg.EpochsPerHistoricalVector genesisVector := r.genesisState.RandaoMixes() var needFromGenesis, inserted uint64 @@ -358,17 +387,26 @@ func (r *HistoricalStatesReader) readRandaoMixes(tx kv.Tx, slot uint64, out soli } needFromDB := size - needFromGenesis - cursor, err := tx.Cursor(kv.RandaoMixes) + + highestAvaiableSlot, err := r.highestSlotInSnapshotsAndDB(tx, kv.RandaoMixes) if err != nil { return err } - defer cursor.Close() var currKeyEpoch uint64 - for k, v, err := cursor.Seek(base_encoding.Encode64ToBytes4(roundedSlot - (needFromDB)*r.cfg.SlotsPerEpoch)); err == nil && k != nil; k, v, err = cursor.Next() { + + for i := roundedSlot - (needFromDB)*r.cfg.SlotsPerEpoch; i <= highestAvaiableSlot; i++ { + key := base_encoding.Encode64ToBytes4(i) + v, err := kvGetter(kv.RandaoMixes, key) + if err != nil { + return err + } + if len(v) == 0 { + continue + } if len(v) != 32 { - return fmt.Errorf("invalid key %x", k) + return fmt.Errorf("invalid key %x", key) } - currKeyEpoch = base_encoding.Decode64FromBytes4(k) / r.cfg.SlotsPerEpoch + currKeyEpoch = i / r.cfg.SlotsPerEpoch out.Set(int(currKeyEpoch%size), common.BytesToHash(v)) inserted++ if inserted == needFromDB { @@ -379,8 +417,9 @@ func (r *HistoricalStatesReader) readRandaoMixes(tx kv.Tx, slot uint64, out soli currKeyEpoch++ out.Set(int(currKeyEpoch%size), genesisVector.Get(int(currKeyEpoch%size))) } + // Now we need to read the intra epoch randao mix. - intraRandaoMix, err := tx.GetOne(kv.IntraRandaoMixes, base_encoding.Encode64ToBytes4(slot)) + intraRandaoMix, err := kvGetter(kv.IntraRandaoMixes, base_encoding.Encode64ToBytes4(slot)) if err != nil { return err } @@ -391,7 +430,7 @@ func (r *HistoricalStatesReader) readRandaoMixes(tx kv.Tx, slot uint64, out soli return nil } -func (r *HistoricalStatesReader) reconstructDiffedUint64List(tx kv.Tx, validatorSetLength, slot uint64, diffBucket string, dumpBucket string) ([]byte, error) { +func (r *HistoricalStatesReader) reconstructDiffedUint64List(tx kv.Tx, kvGetter state_accessors.GetValFn, validatorSetLength, slot uint64, diffBucket string, dumpBucket string) ([]byte, error) { // Read the file remainder := slot % clparams.SlotsPerDump freshDumpSlot := slot - remainder @@ -403,13 +442,14 @@ func (r *HistoricalStatesReader) reconstructDiffedUint64List(tx kv.Tx, validator return nil, err } forward := remainder <= midpoint || currentStageProgress <= freshDumpSlot+clparams.SlotsPerDump + fmt.Println("forward", forward) if forward { - compressed, err = tx.GetOne(dumpBucket, base_encoding.Encode64ToBytes4(freshDumpSlot)) + compressed, err = kvGetter(dumpBucket, base_encoding.Encode64ToBytes4(freshDumpSlot)) if err != nil { return nil, err } } else { - compressed, err = tx.GetOne(dumpBucket, base_encoding.Encode64ToBytes4(freshDumpSlot+clparams.SlotsPerDump)) + compressed, err = kvGetter(dumpBucket, base_encoding.Encode64ToBytes4(freshDumpSlot+clparams.SlotsPerDump)) if err != nil { return nil, err } @@ -438,43 +478,44 @@ func (r *HistoricalStatesReader) reconstructDiffedUint64List(tx kv.Tx, validator return nil, err } - diffCursor, err := tx.Cursor(diffBucket) + highestSlotAvailable, err := r.highestSlotInSnapshotsAndDB(tx, diffBucket) if err != nil { return nil, err } - defer diffCursor.Close() if forward { - for k, v, err := diffCursor.Seek(base_encoding.Encode64ToBytes4(freshDumpSlot)); err == nil && k != nil && base_encoding.Decode64FromBytes4(k) <= slot; k, v, err = diffCursor.Next() { + for currSlot := freshDumpSlot; currSlot <= slot && currSlot <= highestSlotAvailable; currSlot++ { + key := base_encoding.Encode64ToBytes4(currSlot) + v, err := kvGetter(diffBucket, key) if err != nil { return nil, err } - if len(k) != 4 { - return nil, fmt.Errorf("invalid key %x", k) + if len(v) == 0 { + continue + } + if len(key) != 4 { + return nil, fmt.Errorf("invalid key %x", key) } - currSlot := base_encoding.Decode64FromBytes4(k) if currSlot == freshDumpSlot { continue } - if currSlot > slot { - return nil, fmt.Errorf("diff not found for slot %d", slot) - } currentList, err = base_encoding.ApplyCompressedSerializedUint64ListDiff(currentList, currentList, v, false) if err != nil { return nil, err } } } else { - for k, v, err := diffCursor.Seek(base_encoding.Encode64ToBytes4(freshDumpSlot + clparams.SlotsPerDump)); err == nil && k != nil && base_encoding.Decode64FromBytes4(k) > slot; k, v, err = diffCursor.Prev() { + for currSlot := freshDumpSlot + clparams.SlotsPerDump; currSlot > slot && currSlot > r.genesisState.Slot(); currSlot-- { + key := base_encoding.Encode64ToBytes4(currSlot) + v, err := kvGetter(diffBucket, key) if err != nil { return nil, err } - if len(k) != 4 { - return nil, fmt.Errorf("invalid key %x", k) - } - currSlot := base_encoding.Decode64FromBytes4(k) - if currSlot <= slot || currSlot > freshDumpSlot+clparams.SlotsPerDump { + if len(v) == 0 { continue } + if len(key) != 4 { + return nil, fmt.Errorf("invalid key %x", key) + } currentList, err = base_encoding.ApplyCompressedSerializedUint64ListDiff(currentList, currentList, v, true) if err != nil { return nil, err @@ -485,7 +526,7 @@ func (r *HistoricalStatesReader) reconstructDiffedUint64List(tx kv.Tx, validator return currentList, err } -func (r *HistoricalStatesReader) reconstructBalances(tx kv.Tx, validatorSetLength, slot uint64, diffBucket, dumpBucket string) ([]byte, error) { +func (r *HistoricalStatesReader) reconstructBalances(tx kv.Tx, kvGetter state_accessors.GetValFn, validatorSetLength, slot uint64, diffBucket, dumpBucket string) ([]byte, error) { remainder := slot % clparams.SlotsPerDump freshDumpSlot := slot - remainder @@ -501,12 +542,12 @@ func (r *HistoricalStatesReader) reconstructBalances(tx kv.Tx, validatorSetLengt midpoint := uint64(clparams.SlotsPerDump / 2) forward := remainder <= midpoint || currentStageProgress <= freshDumpSlot+clparams.SlotsPerDump if forward { - compressed, err = tx.GetOne(dumpBucket, base_encoding.Encode64ToBytes4(freshDumpSlot)) + compressed, err = kvGetter(dumpBucket, base_encoding.Encode64ToBytes4(freshDumpSlot)) if err != nil { return nil, err } } else { - compressed, err = tx.GetOne(dumpBucket, base_encoding.Encode64ToBytes4(freshDumpSlot+clparams.SlotsPerDump)) + compressed, err = kvGetter(dumpBucket, base_encoding.Encode64ToBytes4(freshDumpSlot+clparams.SlotsPerDump)) if err != nil { return nil, err } @@ -535,7 +576,7 @@ func (r *HistoricalStatesReader) reconstructBalances(tx kv.Tx, validatorSetLengt if i == freshDumpSlot { continue } - diff, err := tx.GetOne(diffBucket, base_encoding.Encode64ToBytes4(i)) + diff, err := kvGetter(diffBucket, base_encoding.Encode64ToBytes4(i)) if err != nil { return nil, err } @@ -549,7 +590,7 @@ func (r *HistoricalStatesReader) reconstructBalances(tx kv.Tx, validatorSetLengt } } else { for i := freshDumpSlot + clparams.SlotsPerDump; i > roundedSlot; i -= r.cfg.SlotsPerEpoch { - diff, err := tx.GetOne(diffBucket, base_encoding.Encode64ToBytes4(i)) + diff, err := kvGetter(diffBucket, base_encoding.Encode64ToBytes4(i)) if err != nil { return nil, err } @@ -563,17 +604,12 @@ func (r *HistoricalStatesReader) reconstructBalances(tx kv.Tx, validatorSetLengt } } - diffCursor, err := tx.Cursor(diffBucket) - if err != nil { - return nil, err - } - defer diffCursor.Close() if slot%r.cfg.SlotsPerEpoch == 0 { currentList = currentList[:validatorSetLength*8] return currentList, nil } - slotDiff, err := tx.GetOne(diffBucket, base_encoding.Encode64ToBytes4(slot)) + slotDiff, err := kvGetter(diffBucket, base_encoding.Encode64ToBytes4(slot)) if err != nil { return nil, err } @@ -585,27 +621,24 @@ func (r *HistoricalStatesReader) reconstructBalances(tx kv.Tx, validatorSetLengt return base_encoding.ApplyCompressedSerializedUint64ListDiff(currentList, currentList, slotDiff, false) } -func (r *HistoricalStatesReader) ReconstructUint64ListDump(tx kv.Tx, slot uint64, bkt string, size int, out solid.Uint64ListSSZ) error { - diffCursor, err := tx.Cursor(bkt) - if err != nil { - return err - } - defer diffCursor.Close() - - k, v, err := diffCursor.Seek(base_encoding.Encode64ToBytes4(slot)) - if err != nil { - return err - } - if k == nil { - return fmt.Errorf("diff not found for slot %d", slot) - } - keySlot := base_encoding.Decode64FromBytes4(k) - if keySlot > slot { - _, v, err = diffCursor.Prev() +func (r *HistoricalStatesReader) ReconstructUint64ListDump(kvGetter state_accessors.GetValFn, slot uint64, bkt string, size int, out solid.Uint64ListSSZ) error { + var ( + v []byte + err error + ) + // Try seeking <= to slot + for i := slot; i >= r.genesisState.Slot(); i-- { + key := base_encoding.Encode64ToBytes4(i) + v, err = kvGetter(bkt, key) if err != nil { return err } + if len(v) == 0 { + continue + } + break } + var b bytes.Buffer if _, err := b.Write(v); err != nil { return err @@ -625,9 +658,9 @@ func (r *HistoricalStatesReader) ReconstructUint64ListDump(tx kv.Tx, slot uint64 return out.DecodeSSZ(currentList, 0) } -func (r *HistoricalStatesReader) ReadValidatorsForHistoricalState(tx kv.Tx, slot uint64) (*solid.ValidatorSet, error) { +func (r *HistoricalStatesReader) ReadValidatorsForHistoricalState(tx kv.Tx, kvGetter state_accessors.GetValFn, slot uint64) (*solid.ValidatorSet, error) { // Read the minimal beacon state which have the small fields. - sd, err := state_accessors.ReadSlotData(tx, slot) + sd, err := state_accessors.ReadSlotData(kvGetter, slot) if err != nil { return nil, err } @@ -648,7 +681,7 @@ func (r *HistoricalStatesReader) ReadValidatorsForHistoricalState(tx kv.Tx, slot }) // Read the balances - bytesEffectiveBalances, err := r.reconstructDiffedUint64List(tx, validatorSetLength, slot, kv.ValidatorEffectiveBalance, kv.EffectiveBalancesDump) + bytesEffectiveBalances, err := r.reconstructDiffedUint64List(tx, kvGetter, validatorSetLength, slot, kv.ValidatorEffectiveBalance, kv.EffectiveBalancesDump) if err != nil { return nil, err } @@ -711,12 +744,12 @@ func (r *HistoricalStatesReader) readPendingEpochs(tx kv.Tx, slot uint64) (*soli } // readParticipations shuffles active indicies and returns the participation flags for the given epoch. -func (r *HistoricalStatesReader) ReadParticipations(tx kv.Tx, slot uint64) (*solid.ParticipationBitList, *solid.ParticipationBitList, error) { +func (r *HistoricalStatesReader) ReadParticipations(tx kv.Tx, kvGetter state_accessors.GetValFn, slot uint64) (*solid.ParticipationBitList, *solid.ParticipationBitList, error) { var beginSlot uint64 epoch, prevEpoch := r.computeRelevantEpochs(slot) beginSlot = prevEpoch * r.cfg.SlotsPerEpoch - currentActiveIndicies, err := state_accessors.ReadActiveIndicies(tx, epoch*r.cfg.SlotsPerEpoch) + currentActiveIndicies, err := state_accessors.ReadActiveIndicies(kvGetter, epoch*r.cfg.SlotsPerEpoch) if err != nil { return nil, nil, err } @@ -724,14 +757,14 @@ func (r *HistoricalStatesReader) ReadParticipations(tx kv.Tx, slot uint64) (*sol if epoch == 0 { previousActiveIndicies = currentActiveIndicies } else { - previousActiveIndicies, err = state_accessors.ReadActiveIndicies(tx, (epoch-1)*r.cfg.SlotsPerEpoch) + previousActiveIndicies, err = state_accessors.ReadActiveIndicies(kvGetter, (epoch-1)*r.cfg.SlotsPerEpoch) if err != nil { return nil, nil, err } } // Read the minimal beacon state which have the small fields. - sd, err := state_accessors.ReadSlotData(tx, slot) + sd, err := state_accessors.ReadSlotData(kvGetter, slot) if err != nil { return nil, nil, err } @@ -746,10 +779,7 @@ func (r *HistoricalStatesReader) ReadParticipations(tx kv.Tx, slot uint64) (*sol if err != nil { return nil, nil, err } - // trigger the cache for shuffled sets in parallel - if err := r.tryCachingEpochsInParallell(tx, [][]uint64{currentActiveIndicies, previousActiveIndicies}, []uint64{epoch, prevEpoch}); err != nil { - return nil, nil, err - } + // Read the previous idxs for i := beginSlot; i <= slot; i++ { // Read the block @@ -784,7 +814,7 @@ func (r *HistoricalStatesReader) ReadParticipations(tx kv.Tx, slot uint64) (*sol attestationEpoch := data.Slot / r.cfg.SlotsPerEpoch mixPosition := (attestationEpoch + r.cfg.EpochsPerHistoricalVector - r.cfg.MinSeedLookahead - 1) % r.cfg.EpochsPerHistoricalVector - mix, err := r.ReadRandaoMixBySlotAndIndex(tx, data.Slot, mixPosition) + mix, err := r.ReadRandaoMixBySlotAndIndex(tx, kvGetter, data.Slot, mixPosition) if err != nil { return false } @@ -795,7 +825,7 @@ func (r *HistoricalStatesReader) ReadParticipations(tx kv.Tx, slot uint64) (*sol return false } var participationFlagsIndicies []uint8 - participationFlagsIndicies, err = r.getAttestationParticipationFlagIndicies(tx, block.Version(), i, *data, i-data.Slot, true) + participationFlagsIndicies, err = r.getAttestationParticipationFlagIndicies(tx, kvGetter, block.Version(), i, *data, i-data.Slot, true) if err != nil { return false } @@ -836,12 +866,12 @@ func (r *HistoricalStatesReader) computeRelevantEpochs(slot uint64) (uint64, uin return epoch, epoch - 1 } -func (r *HistoricalStatesReader) tryCachingEpochsInParallell(tx kv.Tx, activeIdxs [][]uint64, epochs []uint64) error { +func (r *HistoricalStatesReader) tryCachingEpochsInParallell(tx kv.Tx, kvGetter state_accessors.GetValFn, activeIdxs [][]uint64, epochs []uint64) error { var wg sync.WaitGroup wg.Add(len(epochs)) for i, epoch := range epochs { mixPosition := (epoch + r.cfg.EpochsPerHistoricalVector - r.cfg.MinSeedLookahead - 1) % r.cfg.EpochsPerHistoricalVector - mix, err := r.ReadRandaoMixBySlotAndIndex(tx, epochs[0]*r.cfg.SlotsPerEpoch, mixPosition) + mix, err := r.ReadRandaoMixBySlotAndIndex(tx, kvGetter, epochs[0]*r.cfg.SlotsPerEpoch, mixPosition) if err != nil { return err } @@ -856,8 +886,8 @@ func (r *HistoricalStatesReader) tryCachingEpochsInParallell(tx kv.Tx, activeIdx return nil } -func (r *HistoricalStatesReader) ReadValidatorsBalances(tx kv.Tx, slot uint64) (solid.Uint64ListSSZ, error) { - sd, err := state_accessors.ReadSlotData(tx, slot) +func (r *HistoricalStatesReader) ReadValidatorsBalances(tx kv.Tx, kvGetter state_accessors.GetValFn, slot uint64) (solid.Uint64ListSSZ, error) { + sd, err := state_accessors.ReadSlotData(kvGetter, slot) if err != nil { return nil, err } @@ -866,7 +896,7 @@ func (r *HistoricalStatesReader) ReadValidatorsBalances(tx kv.Tx, slot uint64) ( return nil, nil } - balances, err := r.reconstructBalances(tx, sd.ValidatorLength, slot, kv.ValidatorBalance, kv.BalancesDump) + balances, err := r.reconstructBalances(tx, kvGetter, sd.ValidatorLength, slot, kv.ValidatorBalance, kv.BalancesDump) if err != nil { return nil, err } @@ -875,11 +905,11 @@ func (r *HistoricalStatesReader) ReadValidatorsBalances(tx kv.Tx, slot uint64) ( return balancesList, balancesList.DecodeSSZ(balances, 0) } -func (r *HistoricalStatesReader) ReadRandaoMixBySlotAndIndex(tx kv.Tx, slot, index uint64) (common.Hash, error) { +func (r *HistoricalStatesReader) ReadRandaoMixBySlotAndIndex(tx kv.Tx, kvGetter state_accessors.GetValFn, slot, index uint64) (common.Hash, error) { epoch := slot / r.cfg.SlotsPerEpoch epochSubIndex := epoch % r.cfg.EpochsPerHistoricalVector if index == epochSubIndex { - intraRandaoMix, err := tx.GetOne(kv.IntraRandaoMixes, base_encoding.Encode64ToBytes4(slot)) + intraRandaoMix, err := kvGetter(kv.IntraRandaoMixes, base_encoding.Encode64ToBytes4(slot)) if err != nil { return common.Hash{}, err } @@ -908,7 +938,7 @@ func (r *HistoricalStatesReader) ReadRandaoMixBySlotAndIndex(tx kv.Tx, slot, ind if needFromGenesis { return r.genesisState.GetRandaoMixes(epoch), nil } - mixBytes, err := tx.GetOne(kv.RandaoMixes, base_encoding.Encode64ToBytes4(epochLookup*r.cfg.SlotsPerEpoch)) + mixBytes, err := kvGetter(kv.RandaoMixes, base_encoding.Encode64ToBytes4(epochLookup*r.cfg.SlotsPerEpoch)) if err != nil { return common.Hash{}, err } diff --git a/cl/persistence/state/historical_states_reader/historical_states_reader_test.go b/cl/persistence/state/historical_states_reader/historical_states_reader_test.go index 38151494504..290239143bd 100644 --- a/cl/persistence/state/historical_states_reader/historical_states_reader_test.go +++ b/cl/persistence/state/historical_states_reader/historical_states_reader_test.go @@ -41,7 +41,7 @@ func runTest(t *testing.T, blocks []*cltypes.SignedBeaconBlock, preState, postSt ctx := context.Background() vt := state_accessors.NewStaticValidatorTable() - a := antiquary.NewAntiquary(ctx, nil, preState, vt, &clparams.MainnetBeaconConfig, datadir.New("/tmp"), nil, db, nil, reader, log.New(), true, true, true, false, nil) + a := antiquary.NewAntiquary(ctx, nil, preState, vt, &clparams.MainnetBeaconConfig, datadir.New("/tmp"), nil, db, nil, nil, reader, log.New(), true, true, true, false, nil) require.NoError(t, a.IncrementBeaconState(ctx, blocks[len(blocks)-1].Block.Slot+33)) // Now lets test it against the reader tx, err := db.BeginRw(ctx) @@ -50,7 +50,7 @@ func runTest(t *testing.T, blocks []*cltypes.SignedBeaconBlock, preState, postSt vt = state_accessors.NewStaticValidatorTable() require.NoError(t, state_accessors.ReadValidatorsTable(tx, vt)) - hr := historical_states_reader.NewHistoricalStatesReader(&clparams.MainnetBeaconConfig, reader, vt, preState) + hr := historical_states_reader.NewHistoricalStatesReader(&clparams.MainnetBeaconConfig, reader, vt, preState, nil) s, err := hr.ReadHistoricalState(ctx, tx, blocks[len(blocks)-1].Block.Slot) require.NoError(t, err) diff --git a/cl/persistence/state/state_accessors.go b/cl/persistence/state/state_accessors.go index 6e9fdf7b6e6..05353687069 100644 --- a/cl/persistence/state/state_accessors.go +++ b/cl/persistence/state/state_accessors.go @@ -18,16 +18,33 @@ package state_accessors import ( "bytes" + "encoding/binary" "github.com/erigontech/erigon-lib/kv" "github.com/erigontech/erigon/cl/cltypes" "github.com/erigontech/erigon/cl/cltypes/solid" "github.com/erigontech/erigon/cl/persistence/base_encoding" "github.com/erigontech/erigon/cl/phase1/core/state" + "github.com/erigontech/erigon/turbo/snapshotsync" libcommon "github.com/erigontech/erigon-lib/common" ) +type GetValFn func(table string, key []byte) ([]byte, error) + +func GetValFnTxAndSnapshot(tx kv.Tx, snapshotRoTx *snapshotsync.CaplinStateView) GetValFn { + return func(table string, key []byte) ([]byte, error) { + if snapshotRoTx != nil { + slot := uint64(binary.BigEndian.Uint32(key)) + segment, ok := snapshotRoTx.VisibleSegment(slot, table) + if ok { + return segment.Get(slot) + } + } + return tx.GetOne(table, key) + } +} + // InitializeValidatorTable initializes the validator table in the database. func InitializeStaticTables(tx kv.RwTx, state *state.CachingBeaconState) error { var err error @@ -164,9 +181,9 @@ func SetStateProcessingProgress(tx kv.RwTx, progress uint64) error { return tx.Put(kv.StatesProcessingProgress, kv.StatesProcessingKey, base_encoding.Encode64ToBytes4(progress)) } -func ReadSlotData(tx kv.Tx, slot uint64) (*SlotData, error) { +func ReadSlotData(getFn GetValFn, slot uint64) (*SlotData, error) { sd := &SlotData{} - v, err := tx.GetOne(kv.SlotData, base_encoding.Encode64ToBytes4(slot)) + v, err := getFn(kv.SlotData, base_encoding.Encode64ToBytes4(slot)) if err != nil { return nil, err } @@ -178,9 +195,9 @@ func ReadSlotData(tx kv.Tx, slot uint64) (*SlotData, error) { return sd, sd.ReadFrom(buf) } -func ReadEpochData(tx kv.Tx, slot uint64) (*EpochData, error) { +func ReadEpochData(getFn GetValFn, slot uint64) (*EpochData, error) { ed := &EpochData{} - v, err := tx.GetOne(kv.EpochData, base_encoding.Encode64ToBytes4(slot)) + v, err := getFn(kv.EpochData, base_encoding.Encode64ToBytes4(slot)) if err != nil { return nil, err } @@ -193,10 +210,10 @@ func ReadEpochData(tx kv.Tx, slot uint64) (*EpochData, error) { } // ReadCheckpoints reads the checkpoints from the database, Current, Previous and Finalized -func ReadCheckpoints(tx kv.Tx, slot uint64) (current solid.Checkpoint, previous solid.Checkpoint, finalized solid.Checkpoint, ok bool, err error) { +func ReadCheckpoints(getFn GetValFn, slot uint64) (current solid.Checkpoint, previous solid.Checkpoint, finalized solid.Checkpoint, ok bool, err error) { ed := &EpochData{} var v []byte - v, err = tx.GetOne(kv.EpochData, base_encoding.Encode64ToBytes4(slot)) + v, err = getFn(kv.EpochData, base_encoding.Encode64ToBytes4(slot)) if err != nil { return } @@ -212,8 +229,8 @@ func ReadCheckpoints(tx kv.Tx, slot uint64) (current solid.Checkpoint, previous } // ReadCheckpoints reads the checkpoints from the database, Current, Previous and Finalized -func ReadNextSyncCommittee(tx kv.Tx, slot uint64) (committee *solid.SyncCommittee, err error) { - v, err := tx.GetOne(kv.NextSyncCommittee, base_encoding.Encode64ToBytes4(slot)) +func ReadNextSyncCommittee(getFn GetValFn, slot uint64) (committee *solid.SyncCommittee, err error) { + v, err := getFn(kv.NextSyncCommittee, base_encoding.Encode64ToBytes4(slot)) if err != nil { return nil, err } @@ -226,8 +243,8 @@ func ReadNextSyncCommittee(tx kv.Tx, slot uint64) (committee *solid.SyncCommitte } // ReadCheckpoints reads the checkpoints from the database, Current, Previous and Finalized -func ReadCurrentSyncCommittee(tx kv.Tx, slot uint64) (committee *solid.SyncCommittee, err error) { - v, err := tx.GetOne(kv.CurrentSyncCommittee, base_encoding.Encode64ToBytes4(slot)) +func ReadCurrentSyncCommittee(getFn GetValFn, slot uint64) (committee *solid.SyncCommittee, err error) { + v, err := getFn(kv.CurrentSyncCommittee, base_encoding.Encode64ToBytes4(slot)) if err != nil { return nil, err } @@ -301,9 +318,9 @@ func ReadValidatorsTable(tx kv.Tx, out *StaticValidatorTable) error { return err } -func ReadActiveIndicies(tx kv.Tx, slot uint64) ([]uint64, error) { +func ReadActiveIndicies(getFn GetValFn, slot uint64) ([]uint64, error) { key := base_encoding.Encode64ToBytes4(slot) - v, err := tx.GetOne(kv.ActiveValidatorIndicies, key) + v, err := getFn(kv.ActiveValidatorIndicies, key) if err != nil { return nil, err } diff --git a/cl/persistence/state/validator_events.go b/cl/persistence/state/validator_events.go index bc5066f5f5e..ec469a68baa 100644 --- a/cl/persistence/state/validator_events.go +++ b/cl/persistence/state/validator_events.go @@ -50,6 +50,10 @@ func NewStateEvents() *StateEvents { return &StateEvents{} } +func NewStateEventsFromBytes(buf []byte) *StateEvents { + return &StateEvents{buf: libcommon.Copy(buf)} +} + func (se *StateEvents) AddValidator(validatorIndex uint64, validator solid.Validator) { se.mu.Lock() defer se.mu.Unlock() diff --git a/cl/sentinel/sentinel_requests_test.go b/cl/sentinel/sentinel_requests_test.go index cfd0fa65cd4..3fe6d03050d 100644 --- a/cl/sentinel/sentinel_requests_test.go +++ b/cl/sentinel/sentinel_requests_test.go @@ -55,7 +55,7 @@ func loadChain(t *testing.T) (db kv.RwDB, blocks []*cltypes.SignedBeaconBlock, f ctx := context.Background() vt := state_accessors.NewStaticValidatorTable() - a := antiquary.NewAntiquary(ctx, nil, preState, vt, &clparams.MainnetBeaconConfig, datadir.New("/tmp"), nil, db, nil, reader, log.New(), true, true, false, false, nil) + a := antiquary.NewAntiquary(ctx, nil, preState, vt, &clparams.MainnetBeaconConfig, datadir.New("/tmp"), nil, db, nil, nil, reader, log.New(), true, true, false, false, nil) require.NoError(t, a.IncrementBeaconState(ctx, blocks[len(blocks)-1].Block.Slot+33)) return } diff --git a/cmd/capcli/cli.go b/cmd/capcli/cli.go index 830226c658c..ff20788cf11 100644 --- a/cmd/capcli/cli.go +++ b/cmd/capcli/cli.go @@ -27,6 +27,7 @@ import ( "net/http" "net/url" "os" + "runtime" "strconv" "strings" "time" @@ -63,6 +64,7 @@ import ( "github.com/erigontech/erigon/eth/ethconfig" "github.com/erigontech/erigon/eth/ethconfig/estimate" "github.com/erigontech/erigon/turbo/debug" + "github.com/erigontech/erigon/turbo/snapshotsync" "github.com/erigontech/erigon/turbo/snapshotsync/freezeblocks" ) @@ -80,6 +82,7 @@ var CLI struct { CheckBlobsSnapshots CheckBlobsSnapshots `cmd:"" help:"check blobs snapshots"` CheckBlobsSnapshotsCount CheckBlobsSnapshotsCount `cmd:"" help:"check blobs snapshots count"` DumpBlobsSnapshotsToStore DumpBlobsSnapshotsToStore `cmd:"" help:"dump blobs snapshots to store"` + DumpStateSnapshots DumpStateSnapshots `cmd:"" help:"dump state snapshots"` } type chainCfg struct { @@ -178,7 +181,7 @@ func (c *Chain) Run(ctx *Context) error { } downloader := network.NewBackwardBeaconDownloader(ctx, beacon, nil, nil, db) - cfg := stages.StageHistoryReconstruction(downloader, antiquary.NewAntiquary(ctx, nil, nil, nil, nil, dirs, nil, nil, nil, nil, nil, false, false, false, false, nil), csn, db, nil, beaconConfig, true, false, true, bRoot, bs.Slot(), "/tmp", 300*time.Millisecond, nil, nil, blobStorage, log.Root()) + cfg := stages.StageHistoryReconstruction(downloader, antiquary.NewAntiquary(ctx, nil, nil, nil, nil, dirs, nil, nil, nil, nil, nil, nil, false, false, false, false, nil), csn, db, nil, beaconConfig, true, false, true, bRoot, bs.Slot(), "/tmp", 300*time.Millisecond, nil, nil, blobStorage, log.Root()) return stages.SpawnStageHistoryDownload(cfg, ctx, log.Root()) } @@ -534,6 +537,7 @@ func (c *LoopSnapshots) Run(ctx *Context) error { type RetrieveHistoricalState struct { chainCfg outputFolder + withPPROF CompareFile string `help:"compare file" default:""` CompareSlot uint64 `help:"compare slot" default:"0"` Out string `help:"output file" default:""` @@ -579,7 +583,17 @@ func (r *RetrieveHistoricalState) Run(ctx *Context) error { return err } - hr := historical_states_reader.NewHistoricalStatesReader(beaconConfig, snr, vt, gSpot) + snTypes := snapshotsync.MakeCaplinStateSnapshotsTypes(db) + stateSn := snapshotsync.NewCaplinStateSnapshots(ethconfig.BlocksFreezing{}, beaconConfig, dirs, snTypes, log.Root()) + if err := stateSn.OpenFolder(); err != nil { + return err + } + if _, err := antiquary.FillStaticValidatorsTableIfNeeded(ctx, log.Root(), stateSn, vt); err != nil { + return err + } + fmt.Println(vt.WithdrawableEpoch(0, 1)) + r.withPPROF.withProfile() + hr := historical_states_reader.NewHistoricalStatesReader(beaconConfig, snr, vt, gSpot, stateSn) start := time.Now() haveState, err := hr.ReadHistoricalState(ctx, tx, r.CompareSlot) if err != nil { @@ -635,11 +649,11 @@ func (r *RetrieveHistoricalState) Run(ctx *Context) error { return err } if hRoot != wRoot { - // for i := 0; i < haveState.PreviousEpochParticipation().Length(); i++ { - // if haveState.PreviousEpochParticipation().Get(i) != wantState.PreviousEpochParticipation().Get(i) { - // log.Info("Participation mismatch", "index", i, "have", haveState.PreviousEpochParticipation().Get(i), "want", wantState.PreviousEpochParticipation().Get(i)) - // } - // } + for i := 0; i < haveState.PreviousEpochParticipation().Length(); i++ { + if haveState.BlockRoots().Get(i) != wantState.BlockRoots().Get(i) { + log.Info("block roots mismatch", "index", i, "have", haveState.BlockRoots().Get(i), "want", wantState.BlockRoots().Get(i)) + } + } return fmt.Errorf("state mismatch: got %s, want %s", libcommon.Hash(hRoot), libcommon.Hash(wRoot)) } return nil @@ -1171,3 +1185,60 @@ func (c *DumpBlobsSnapshotsToStore) Run(ctx *Context) error { return nil } + +type DumpStateSnapshots struct { + chainCfg + outputFolder + To uint64 `name:"to" help:"slot to dump"` + StepSize uint64 `name:"step-size" help:"step size" default:"10000"` +} + +func (c *DumpStateSnapshots) Run(ctx *Context) error { + _, beaconConfig, _, err := clparams.GetConfigsByNetworkName(c.Chain) + if err != nil { + return err + } + log.Root().SetHandler(log.LvlFilterHandler(log.LvlDebug, log.StderrHandler)) + log.Info("Started chain download", "chain", c.Chain) + + dirs := datadir.New(c.Datadir) + log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StderrHandler)) + + db, _, err := caplin1.OpenCaplinDatabase(ctx, beaconConfig, nil, dirs.CaplinIndexing, dirs.CaplinBlobs, nil, false, 0) + if err != nil { + return err + } + var to uint64 + db.View(ctx, func(tx kv.Tx) (err error) { + if c.To == 0 { + to, err = state_accessors.GetStateProcessingProgress(tx) + return + } + to = c.To + return + }) + + salt, err := snaptype.GetIndexSalt(dirs.Snap) + + if err != nil { + return err + } + snTypes := snapshotsync.MakeCaplinStateSnapshotsTypes(db) + stateSn := snapshotsync.NewCaplinStateSnapshots(ethconfig.BlocksFreezing{}, beaconConfig, dirs, snTypes, log.Root()) + if err := stateSn.OpenFolder(); err != nil { + return err + } + r, _ := stateSn.Get(kv.BlockRoot, 999424) + fmt.Printf("%x\n", r) + + if err := stateSn.DumpCaplinState(ctx, stateSn.BlocksAvailable(), to, c.StepSize, salt, dirs, runtime.NumCPU(), log.LvlInfo, log.Root()); err != nil { + return err + } + if err := stateSn.OpenFolder(); err != nil { + return err + } + r, _ = stateSn.Get(kv.BlockRoot, 999424) + fmt.Printf("%x\n", r) + + return nil +} diff --git a/cmd/caplin/caplin1/run.go b/cmd/caplin/caplin1/run.go index 21a122af25e..15aab3ca358 100644 --- a/cmd/caplin/caplin1/run.go +++ b/cmd/caplin/caplin1/run.go @@ -51,6 +51,7 @@ import ( "github.com/erigontech/erigon/cl/validator/validator_params" "github.com/erigontech/erigon/eth/ethconfig" "github.com/erigontech/erigon/params" + "github.com/erigontech/erigon/turbo/snapshotsync" "github.com/erigontech/erigon/turbo/snapshotsync/freezeblocks" "github.com/spf13/afero" @@ -380,8 +381,11 @@ func RunCaplinService(ctx context.Context, engine execution_client.ExecutionEngi return err } } - - antiq := antiquary.NewAntiquary(ctx, blobStorage, genesisState, vTables, beaconConfig, dirs, snDownloader, indexDB, csn, rcsn, logger, states, backfilling, blobBackfilling, config.SnapshotGenerationEnabled, snBuildSema) + stateSnapshots := snapshotsync.NewCaplinStateSnapshots(ethconfig.BlocksFreezing{}, beaconConfig, dirs, snapshotsync.MakeCaplinStateSnapshotsTypes(indexDB), logger) + if err := stateSnapshots.OpenFolder(); err != nil { + return err + } + antiq := antiquary.NewAntiquary(ctx, blobStorage, genesisState, vTables, beaconConfig, dirs, snDownloader, indexDB, stateSnapshots, csn, rcsn, logger, states, backfilling, blobBackfilling, config.SnapshotGenerationEnabled, snBuildSema) // Create the antiquary go func() { if err := antiq.Loop(); err != nil { @@ -393,7 +397,7 @@ func RunCaplinService(ctx context.Context, engine execution_client.ExecutionEngi return err } - statesReader := historical_states_reader.NewHistoricalStatesReader(beaconConfig, rcsn, vTables, genesisState) + statesReader := historical_states_reader.NewHistoricalStatesReader(beaconConfig, rcsn, vTables, genesisState, stateSnapshots) validatorParameters := validator_params.NewValidatorParams() if config.BeaconAPIRouter.Active { apiHandler := handler.NewApiHandler( @@ -428,6 +432,7 @@ func RunCaplinService(ctx context.Context, engine execution_client.ExecutionEngi proposerSlashingService, option.builderClient, validatorMonitor, + stateSnapshots, true, ) go beacon.ListenAndServe(&beacon.LayeredBeaconHandler{ diff --git a/erigon-lib/common/datadir/dirs.go b/erigon-lib/common/datadir/dirs.go index 266625088f2..ca6b975552c 100644 --- a/erigon-lib/common/datadir/dirs.go +++ b/erigon-lib/common/datadir/dirs.go @@ -42,6 +42,7 @@ type Dirs struct { SnapHistory string SnapDomain string SnapAccessors string + SnapCaplin string Downloader string TxPool string Nodes string @@ -72,6 +73,7 @@ func New(datadir string) Dirs { SnapHistory: filepath.Join(datadir, "snapshots", "history"), SnapDomain: filepath.Join(datadir, "snapshots", "domain"), SnapAccessors: filepath.Join(datadir, "snapshots", "accessor"), + SnapCaplin: filepath.Join(datadir, "snapshots", "caplin"), Downloader: filepath.Join(datadir, "downloader"), TxPool: filepath.Join(datadir, "txpool"), Nodes: filepath.Join(datadir, "nodes"), @@ -82,7 +84,7 @@ func New(datadir string) Dirs { } dir.MustExist(dirs.Chaindata, dirs.Tmp, - dirs.SnapIdx, dirs.SnapHistory, dirs.SnapDomain, dirs.SnapAccessors, + dirs.SnapIdx, dirs.SnapHistory, dirs.SnapDomain, dirs.SnapAccessors, dirs.SnapCaplin, dirs.Downloader, dirs.TxPool, dirs.Nodes, dirs.CaplinBlobs, dirs.CaplinIndexing, dirs.CaplinLatest, dirs.CaplinGenesis) return dirs } diff --git a/erigon-lib/downloader/downloader.go b/erigon-lib/downloader/downloader.go index 6fb5eb1cbfc..e089097d0a5 100644 --- a/erigon-lib/downloader/downloader.go +++ b/erigon-lib/downloader/downloader.go @@ -2645,14 +2645,21 @@ func SeedableFiles(dirs datadir.Dirs, chainName string, all bool) ([]string, err if err != nil { return nil, err } - var l4 []string + var l4, l5 []string if all { l4, err = seedableStateFilesBySubDir(dirs.Snap, "accessor", all) if err != nil { return nil, err } } - files = append(append(append(append(files, l1...), l2...), l3...), l4...) + // check if dirs.SnapCaplin exists + if _, err := os.Stat(dirs.SnapCaplin); !os.IsNotExist(err) { + l5, err = seedableSegmentFiles(dirs.SnapCaplin, chainName, all) + if err != nil { + return nil, err + } + } + files = append(append(append(append(append(files, l1...), l2...), l3...), l4...), l5...) return files, nil } diff --git a/erigon-lib/downloader/snaptype/files.go b/erigon-lib/downloader/snaptype/files.go index 26cebf23e70..4ac92207dc2 100644 --- a/erigon-lib/downloader/snaptype/files.go +++ b/erigon-lib/downloader/snaptype/files.go @@ -152,6 +152,8 @@ func parseFileName(dir, fileName string) (res FileInfo, ok bool) { return } res.To = to * 1_000 + res.TypeString = parts[3] + res.Type, ok = ParseFileType(parts[3]) if !ok { return res, ok @@ -243,6 +245,7 @@ type FileInfo struct { From, To uint64 name, Path, Ext string Type Type + TypeString string // This is for giulio's generic snapshots } func (f FileInfo) TorrentFileExists() (bool, error) { return dir.FileExist(f.Path + ".torrent") } diff --git a/erigon-lib/downloader/snaptype/type.go b/erigon-lib/downloader/snaptype/type.go index 70dda5e6a99..a5a3244dd93 100644 --- a/erigon-lib/downloader/snaptype/type.go +++ b/erigon-lib/downloader/snaptype/type.go @@ -484,6 +484,67 @@ func BuildIndex(ctx context.Context, info FileInfo, cfg recsplit.RecSplitArgs, l } } +func BuildIndexWithSnapName(ctx context.Context, info FileInfo, cfg recsplit.RecSplitArgs, lvl log.Lvl, p *background.Progress, walker func(idx *recsplit.RecSplit, i, offset uint64, word []byte) error, logger log.Logger) (err error) { + defer func() { + if rec := recover(); rec != nil { + err = fmt.Errorf("index panic: at=%s, %v, %s", info.Name(), rec, dbg.Stack()) + } + }() + + d, err := seg.NewDecompressor(info.Path) + if err != nil { + return fmt.Errorf("can't open %s for indexing: %w", info.Name(), err) + } + defer d.Close() + + if p != nil { + fname := info.Name() + p.Name.Store(&fname) + p.Total.Store(uint64(d.Count())) + } + cfg.KeyCount = d.Count() + cfg.IndexFile = filepath.Join(info.Dir(), strings.ReplaceAll(info.name, ".seg", ".idx")) + rs, err := recsplit.NewRecSplit(cfg, logger) + if err != nil { + return err + } + rs.LogLvl(lvl) + + defer d.EnableReadAhead().DisableReadAhead() + + for { + g := d.MakeGetter() + var i, offset, nextPos uint64 + word := make([]byte, 0, 4096) + + for g.HasNext() { + word, nextPos = g.Next(word[:0]) + if err := walker(rs, i, offset, word); err != nil { + return err + } + i++ + offset = nextPos + + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + } + + if err = rs.Build(ctx); err != nil { + if errors.Is(err, recsplit.ErrCollision) { + logger.Info("Building recsplit. Collision happened. It's ok. Restarting with another salt...", "err", err) + rs.ResetNextSalt() + continue + } + return err + } + + return nil + } +} + func ExtractRange(ctx context.Context, f FileInfo, extractor RangeExtractor, indexBuilder IndexBuilder, firstKey FirstKeyGetter, chainDB kv.RoDB, chainConfig *chain.Config, tmpDir string, workers int, lvl log.Lvl, logger log.Logger) (uint64, error) { var lastKeyValue uint64 diff --git a/erigon-lib/downloader/util.go b/erigon-lib/downloader/util.go index 5dc141ddbca..3887820a23a 100644 --- a/erigon-lib/downloader/util.go +++ b/erigon-lib/downloader/util.go @@ -24,6 +24,7 @@ import ( "fmt" "io" "os" + "path" "path/filepath" "runtime" "strings" @@ -79,8 +80,12 @@ func seedableSegmentFiles(dir string, chainName string, skipSeedableCheck bool) res := make([]string, 0, len(files)) for _, fPath := range files { - _, name := filepath.Split(fPath) + // A bit hacky but whatever... basically caplin is incompatible with enums. + if strings.Contains(dir, "caplin") { + res = append(res, path.Join("caplin", name)) + continue + } if !skipSeedableCheck && !snaptype.IsCorrectFileName(name) { continue } diff --git a/turbo/snapshotsync/caplin_state_snapshots.go b/turbo/snapshotsync/caplin_state_snapshots.go new file mode 100644 index 00000000000..f5c75568dcc --- /dev/null +++ b/turbo/snapshotsync/caplin_state_snapshots.go @@ -0,0 +1,712 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + +package snapshotsync + +import ( + "context" + "encoding/binary" + "errors" + "fmt" + "math" + "os" + "path/filepath" + "runtime/debug" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/tidwall/btree" + + "github.com/erigontech/erigon-lib/log/v3" + "github.com/erigontech/erigon-lib/recsplit" + + "github.com/erigontech/erigon-lib/chain/snapcfg" + libcommon "github.com/erigontech/erigon-lib/common" + "github.com/erigontech/erigon-lib/common/background" + "github.com/erigontech/erigon-lib/common/datadir" + "github.com/erigontech/erigon-lib/downloader/snaptype" + "github.com/erigontech/erigon-lib/kv" + "github.com/erigontech/erigon-lib/seg" + + "github.com/erigontech/erigon/cl/clparams" + "github.com/erigontech/erigon/cl/persistence/base_encoding" + "github.com/erigontech/erigon/eth/ethconfig" +) + +func BeaconSimpleIdx(ctx context.Context, sn snaptype.FileInfo, salt uint32, tmpDir string, p *background.Progress, lvl log.Lvl, logger log.Logger) (err error) { + num := make([]byte, binary.MaxVarintLen64) + cfg := recsplit.RecSplitArgs{ + Enums: true, + BucketSize: 2000, + LeafSize: 8, + TmpDir: tmpDir, + Salt: &salt, + BaseDataID: sn.From, + } + if err := snaptype.BuildIndex(ctx, sn, cfg, log.LvlDebug, p, func(idx *recsplit.RecSplit, i, offset uint64, word []byte) error { + if i%20_000 == 0 { + logger.Log(lvl, "Generating idx for "+sn.Type.Name(), "progress", i) + } + p.Processed.Add(1) + n := binary.PutUvarint(num, i) + if err := idx.AddKey(num[:n], offset); err != nil { + return err + } + return nil + }, logger); err != nil { + return fmt.Errorf("idx: %w", err) + } + + return nil +} + +func getKvGetterForStateTable(db kv.RoDB, tableName string) KeyValueGetter { + return func(numId uint64) ([]byte, []byte, error) { + var key, value []byte + var err error + if err := db.View(context.TODO(), func(tx kv.Tx) error { + key = base_encoding.Encode64ToBytes4(numId) + value, err = tx.GetOne(tableName, base_encoding.Encode64ToBytes4(numId)) + value = libcommon.Copy(value) + return err + }); err != nil { + return nil, nil, err + } + return key, value, nil + } +} + +func MakeCaplinStateSnapshotsTypes(db kv.RoDB) SnapshotTypes { + return SnapshotTypes{ + KeyValueGetters: map[string]KeyValueGetter{ + kv.ValidatorEffectiveBalance: getKvGetterForStateTable(db, kv.ValidatorEffectiveBalance), + kv.ValidatorSlashings: getKvGetterForStateTable(db, kv.ValidatorSlashings), + kv.ValidatorBalance: getKvGetterForStateTable(db, kv.ValidatorBalance), + kv.StateEvents: getKvGetterForStateTable(db, kv.StateEvents), + kv.ActiveValidatorIndicies: getKvGetterForStateTable(db, kv.ActiveValidatorIndicies), + kv.StateRoot: getKvGetterForStateTable(db, kv.StateRoot), + kv.BlockRoot: getKvGetterForStateTable(db, kv.BlockRoot), + kv.SlotData: getKvGetterForStateTable(db, kv.SlotData), + kv.EpochData: getKvGetterForStateTable(db, kv.EpochData), + kv.InactivityScores: getKvGetterForStateTable(db, kv.InactivityScores), + kv.NextSyncCommittee: getKvGetterForStateTable(db, kv.NextSyncCommittee), + kv.CurrentSyncCommittee: getKvGetterForStateTable(db, kv.CurrentSyncCommittee), + kv.Eth1DataVotes: getKvGetterForStateTable(db, kv.Eth1DataVotes), + kv.IntraRandaoMixes: getKvGetterForStateTable(db, kv.IntraRandaoMixes), + kv.RandaoMixes: getKvGetterForStateTable(db, kv.RandaoMixes), + kv.Proposers: getKvGetterForStateTable(db, kv.Proposers), + kv.BalancesDump: getKvGetterForStateTable(db, kv.BalancesDump), + kv.EffectiveBalancesDump: getKvGetterForStateTable(db, kv.EffectiveBalancesDump), + }, + Compression: map[string]bool{}, + } +} + +// value: chunked(ssz(SignedBeaconBlocks)) +// slot -> beacon_slot_segment_offset + +type CaplinStateSnapshots struct { + indicesReady atomic.Bool + segmentsReady atomic.Bool + + Salt uint32 + + dirtySegmentsLock sync.RWMutex + visibleSegmentsLock sync.RWMutex + + // BeaconBlocks *segments + // BlobSidecars *segments + // Segments map[string]*segments + dirtyLock sync.RWMutex // guards `dirty` field + dirty map[string]*btree.BTreeG[*DirtySegment] // ordered map `type.Enum()` -> DirtySegments + + visibleLock sync.RWMutex // guards `visible` field + visible map[string]VisibleSegments // ordered map `type.Enum()` -> VisbileSegments + + snapshotTypes SnapshotTypes + + dir string + tmpdir string + segmentsMax atomic.Uint64 // all types of .seg files are available - up to this number + idxMax atomic.Uint64 // all types of .idx files are available - up to this number + cfg ethconfig.BlocksFreezing + logger log.Logger + // allows for pruning segments - this is the min availible segment + segmentsMin atomic.Uint64 + // chain cfg + beaconCfg *clparams.BeaconChainConfig +} + +type KeyValueGetter func(numId uint64) ([]byte, []byte, error) + +type SnapshotTypes struct { + KeyValueGetters map[string]KeyValueGetter + Compression map[string]bool +} + +// NewCaplinStateSnapshots - opens all snapshots. But to simplify everything: +// - it opens snapshots only on App start and immutable after +// - all snapshots of given blocks range must exist - to make this blocks range available +// - gaps are not allowed +// - segment have [from:to) semantic +func NewCaplinStateSnapshots(cfg ethconfig.BlocksFreezing, beaconCfg *clparams.BeaconChainConfig, dirs datadir.Dirs, snapshotTypes SnapshotTypes, logger log.Logger) *CaplinStateSnapshots { + // BeaconBlocks := &segments{ + // DirtySegments: btree.NewBTreeGOptions[*DirtySegment](DirtySegmentLess, btree.Options{Degree: 128, NoLocks: false}), + // } + // BlobSidecars := &segments{ + // DirtySegments: btree.NewBTreeGOptions[*DirtySegment](DirtySegmentLess, btree.Options{Degree: 128, NoLocks: false}), + // } + // Segments := make(map[string]*segments) + // for k := range snapshotTypes.KeyValueGetters { + // Segments[k] = &segments{ + // DirtySegments: btree.NewBTreeGOptions[*DirtySegment](DirtySegmentLess, btree.Options{Degree: 128, NoLocks: false}), + // } + // } + dirty := make(map[string]*btree.BTreeG[*DirtySegment]) + for k := range snapshotTypes.KeyValueGetters { + dirty[k] = btree.NewBTreeGOptions[*DirtySegment](DirtySegmentLess, btree.Options{Degree: 128, NoLocks: false}) + } + visible := make(map[string]VisibleSegments) + for k := range snapshotTypes.KeyValueGetters { + visible[k] = make(VisibleSegments, 0) + } + c := &CaplinStateSnapshots{snapshotTypes: snapshotTypes, dir: dirs.SnapCaplin, tmpdir: dirs.Tmp, cfg: cfg, visible: visible, dirty: dirty, logger: logger, beaconCfg: beaconCfg} + c.recalcVisibleFiles() + return c +} + +func (s *CaplinStateSnapshots) IndicesMax() uint64 { return s.idxMax.Load() } +func (s *CaplinStateSnapshots) SegmentsMax() uint64 { return s.segmentsMax.Load() } + +func (s *CaplinStateSnapshots) LogStat(str string) { + s.logger.Info(fmt.Sprintf("[snapshots:%s] Stat", str), + "blocks", libcommon.PrettyCounter(s.SegmentsMax()+1), "indices", libcommon.PrettyCounter(s.IndicesMax()+1)) +} + +func (s *CaplinStateSnapshots) LS() { + if s == nil { + return + } + view := s.View() + defer view.Close() + + for _, roTx := range view.roTxs { + if roTx != nil { + for _, seg := range roTx.Segments { + s.logger.Info("[agg] ", "f", seg.src.filePath, "words", seg.src.Decompressor.Count()) + } + } + } +} + +func (s *CaplinStateSnapshots) SegFileNames(from, to uint64) []string { + view := s.View() + defer view.Close() + + var res []string + + for _, roTx := range view.roTxs { + if roTx == nil { + continue + } + for _, seg := range roTx.Segments { + if seg.from >= to || seg.to <= from { + continue + } + res = append(res, seg.src.filePath) + } + + } + return res +} + +func (s *CaplinStateSnapshots) BlocksAvailable() uint64 { + return min(s.segmentsMax.Load(), s.idxMax.Load()) +} + +func (s *CaplinStateSnapshots) Close() { + if s == nil { + return + } + s.dirtySegmentsLock.Lock() + defer s.dirtySegmentsLock.Unlock() + + s.closeWhatNotInList(nil) +} + +func (s *CaplinStateSnapshots) openSegIfNeed(sn *DirtySegment, filepath string) error { + if sn.Decompressor != nil { + return nil + } + var err error + sn.Decompressor, err = seg.NewDecompressor(filepath) + if err != nil { + return fmt.Errorf("%w, fileName: %s", err, filepath) + } + return nil +} + +// OpenList stops on optimistic=false, continue opening files on optimistic=true +func (s *CaplinStateSnapshots) OpenList(fileNames []string, optimistic bool) error { + defer s.recalcVisibleFiles() + + s.dirtySegmentsLock.Lock() + defer s.dirtySegmentsLock.Unlock() + + s.closeWhatNotInList(fileNames) + var segmentsMax uint64 + var segmentsMaxSet bool +Loop: + for _, fName := range fileNames { + f, _, _ := snaptype.ParseFileName(s.dir, fName) + + var processed bool = true + var exists bool + var sn *DirtySegment + + dirtySegments, ok := s.dirty[f.TypeString] + if !ok { + continue + } + filePath := filepath.Join(s.dir, fName) + dirtySegments.Walk(func(segments []*DirtySegment) bool { + for _, sn2 := range segments { + if sn2.Decompressor == nil { // it's ok if some segment was not able to open + continue + } + if filePath == sn2.filePath { + sn = sn2 + exists = true + break + } + } + return true + }) + if !exists { + sn = &DirtySegment{ + // segType: f.Type, Unsupported + version: f.Version, + Range: Range{f.From, f.To}, + frozen: snapcfg.IsFrozen(s.cfg.ChainName, f), + filePath: filePath, + } + } + if err := s.openSegIfNeed(sn, filePath); err != nil { + if errors.Is(err, os.ErrNotExist) { + if optimistic { + continue Loop + } else { + break Loop + } + } + if optimistic { + s.logger.Warn("[snapshots] open segment", "err", err) + continue Loop + } else { + return err + } + } + + if !exists { + // it's possible to iterate over .seg file even if you don't have index + // then make segment available even if index open may fail + dirtySegments.Set(sn) + } + if err := openIdxForCaplinStateIfNeeded(sn, filePath, optimistic); err != nil { + return err + } + // Only bob sidecars count for progression + if processed { + if f.To > 0 { + segmentsMax = f.To - 1 + } else { + segmentsMax = 0 + } + segmentsMaxSet = true + } + } + + if segmentsMaxSet { + s.segmentsMax.Store(segmentsMax) + } + s.segmentsReady.Store(true) + return nil +} + +func openIdxForCaplinStateIfNeeded(s *DirtySegment, filePath string, optimistic bool) error { + if s.Decompressor == nil { + return nil + } + err := openIdxIfNeedForCaplinState(s, filePath) + if err != nil { + if !errors.Is(err, os.ErrNotExist) { + if optimistic { + log.Warn("[snapshots] open index", "err", err) + } else { + return err + } + } + } + + return nil +} + +func openIdxIfNeedForCaplinState(s *DirtySegment, filePath string) (err error) { + s.indexes = make([]*recsplit.Index, 1) + if s.indexes[0] != nil { + return nil + } + + filePath = strings.ReplaceAll(filePath, ".seg", ".idx") + index, err := recsplit.OpenIndex(filePath) + if err != nil { + return fmt.Errorf("%w, fileName: %s", err, filePath) + } + + s.indexes[0] = index + + return nil +} + +func isIndexed(s *DirtySegment) bool { + if s.Decompressor == nil { + return false + } + + for _, idx := range s.indexes { + if idx == nil { + return false + } + } + return true +} + +func (s *CaplinStateSnapshots) recalcVisibleFiles() { + defer func() { + s.idxMax.Store(s.idxAvailability()) + s.indicesReady.Store(true) + }() + + s.visibleLock.Lock() + defer s.visibleLock.Unlock() + + getNewVisibleSegments := func(dirtySegments *btree.BTreeG[*DirtySegment]) []*VisibleSegment { + newVisibleSegments := make([]*VisibleSegment, 0, dirtySegments.Len()) + dirtySegments.Walk(func(segments []*DirtySegment) bool { + for _, sn := range segments { + if sn.canDelete.Load() { + continue + } + if !isIndexed(sn) { + continue + } + for len(newVisibleSegments) > 0 && newVisibleSegments[len(newVisibleSegments)-1].src.isSubSetOf(sn) { + newVisibleSegments[len(newVisibleSegments)-1].src = nil + newVisibleSegments = newVisibleSegments[:len(newVisibleSegments)-1] + } + newVisibleSegments = append(newVisibleSegments, &VisibleSegment{ + Range: sn.Range, + segType: sn.segType, + src: sn, + }) + } + return true + }) + return newVisibleSegments + } + + for k := range s.visible { + s.visible[k] = getNewVisibleSegments(s.dirty[k]) + } +} + +func (s *CaplinStateSnapshots) idxAvailability() uint64 { + s.visibleLock.RLock() + defer s.visibleLock.RUnlock() + + min := uint64(math.MaxUint64) + for _, segs := range s.visible { + if len(segs) == 0 { + return 0 + } + if segs[len(segs)-1].to < min { + min = segs[len(segs)-1].to + } + } + if min == math.MaxUint64 { + return 0 + } + return min +} + +func listAllSegFilesInDir(dir string) []string { + files, err := os.ReadDir(dir) + if err != nil { + panic(err) + } + list := make([]string, 0, len(files)) + for _, f := range files { + if f.IsDir() { + continue + } + // check if it's a .seg file + if filepath.Ext(f.Name()) != ".seg" { + continue + } + list = append(list, f.Name()) + } + return list +} + +func (s *CaplinStateSnapshots) OpenFolder() error { + return s.OpenList(listAllSegFilesInDir(s.dir), false) +} + +func (s *CaplinStateSnapshots) closeWhatNotInList(l []string) { + protectFiles := make(map[string]struct{}, len(l)) + for _, fName := range l { + protectFiles[fName] = struct{}{} + } + + for _, dirtySegments := range s.dirty { + toClose := make([]*DirtySegment, 0) + dirtySegments.Walk(func(segments []*DirtySegment) bool { + for _, sn := range segments { + if sn.Decompressor == nil { + continue + } + _, name := filepath.Split(sn.FilePath()) + if _, ok := protectFiles[name]; ok { + continue + } + toClose = append(toClose, sn) + } + return true + }) + for _, sn := range toClose { + sn.close() + dirtySegments.Delete(sn) + } + } +} + +type CaplinStateView struct { + s *CaplinStateSnapshots + roTxs map[string]*RoTx + closed bool +} + +func (s *CaplinStateSnapshots) View() *CaplinStateView { + if s == nil { + return nil + } + s.visibleSegmentsLock.RLock() + defer s.visibleSegmentsLock.RUnlock() + + v := &CaplinStateView{s: s, roTxs: make(map[string]*RoTx)} + // BeginRo increments refcount - which is contended + s.dirtySegmentsLock.RLock() + defer s.dirtySegmentsLock.RUnlock() + + for k, segments := range s.visible { + v.roTxs[k] = segments.BeginRo() + } + return v +} + +func (v *CaplinStateView) Close() { + if v == nil { + return + } + if v.closed { + return + } + for _, segments := range v.roTxs { + segments.Close() + } + v.s = nil + v.closed = true +} + +func (v *CaplinStateView) VisibleSegments(tbl string) []*VisibleSegment { + if v.s == nil || v.s.visible[tbl] == nil { + return nil + } + return v.s.visible[tbl] +} + +func (v *CaplinStateView) VisibleSegment(slot uint64, tbl string) (*VisibleSegment, bool) { + for _, seg := range v.VisibleSegments(tbl) { + if !(slot >= seg.from && slot < seg.to) { + continue + } + return seg, true + } + return nil, false +} + +func dumpCaplinState(ctx context.Context, snapName string, kvGetter KeyValueGetter, fromSlot uint64, toSlot, blocksPerFile uint64, salt uint32, dirs datadir.Dirs, workers int, lvl log.Lvl, logger log.Logger, compress bool) error { + tmpDir, snapDir := dirs.Tmp, dirs.SnapCaplin + + segName := snaptype.BeaconBlocks.FileName(0, fromSlot, toSlot) + // a little bit ugly. + segName = strings.ReplaceAll(segName, "beaconblocks", snapName) + f, _, _ := snaptype.ParseFileName(snapDir, segName) + + compressCfg := seg.DefaultCfg + compressCfg.Workers = workers + sn, err := seg.NewCompressor(ctx, "Snapshots "+snapName, f.Path, tmpDir, compressCfg, lvl, logger) + if err != nil { + return err + } + defer sn.Close() + + // Generate .seg file, which is just the list of beacon blocks. + for i := fromSlot; i < toSlot; i++ { + // read root. + _, dump, err := kvGetter(i) + if err != nil { + return err + } + if i%20_000 == 0 { + logger.Log(lvl, "Dumping "+snapName, "progress", i) + } + if compress { + if err := sn.AddWord(dump); err != nil { + return err + } + } else { + if err := sn.AddUncompressedWord(dump); err != nil { + return err + } + } + } + if sn.Count() != int(blocksPerFile) { + return fmt.Errorf("expected %d blocks, got %d", blocksPerFile, sn.Count()) + } + if err := sn.Compress(); err != nil { + return err + } + // Generate .idx file, which is the slot => offset mapping. + p := &background.Progress{} + + // Ugly hack to wait for fsync + time.Sleep(15 * time.Second) + + return simpleIdx(ctx, f, salt, tmpDir, p, lvl, logger) +} + +func simpleIdx(ctx context.Context, sn snaptype.FileInfo, salt uint32, tmpDir string, p *background.Progress, lvl log.Lvl, logger log.Logger) (err error) { + num := make([]byte, binary.MaxVarintLen64) + cfg := recsplit.RecSplitArgs{ + Enums: true, + BucketSize: 2000, + LeafSize: 8, + TmpDir: tmpDir, + Salt: &salt, + BaseDataID: sn.From, + } + if err := snaptype.BuildIndexWithSnapName(ctx, sn, cfg, log.LvlDebug, p, func(idx *recsplit.RecSplit, i, offset uint64, word []byte) error { + if i%20_000 == 0 { + logger.Log(lvl, "Generating idx for "+sn.Name(), "progress", i) + } + p.Processed.Add(1) + n := binary.PutUvarint(num, i) + if err := idx.AddKey(num[:n], offset); err != nil { + return err + } + return nil + }, logger); err != nil { + return fmt.Errorf("idx: %w", err) + } + + return nil +} + +func (s *CaplinStateSnapshots) DumpCaplinState(ctx context.Context, fromSlot, toSlot, blocksPerFile uint64, salt uint32, dirs datadir.Dirs, workers int, lvl log.Lvl, logger log.Logger) error { + fromSlot = (fromSlot / blocksPerFile) * blocksPerFile + toSlot = (toSlot / blocksPerFile) * blocksPerFile + for snapName, kvGetter := range s.snapshotTypes.KeyValueGetters { + for i := fromSlot; i < toSlot; i += blocksPerFile { + if toSlot-i < blocksPerFile { + break + } + // keep beaconblocks here but whatever.... + to := i + blocksPerFile + logger.Log(lvl, "Dumping "+snapName, "from", i, "to", to) + if err := dumpCaplinState(ctx, snapName, kvGetter, i, to, blocksPerFile, salt, dirs, workers, lvl, logger, s.snapshotTypes.Compression[snapName]); err != nil { + return err + } + } + } + return nil +} + +func (s *CaplinStateSnapshots) BuildMissingIndices(ctx context.Context, logger log.Logger) error { + if s == nil { + return nil + } + // if !s.segmentsReady.Load() { + // return fmt.Errorf("not all snapshot segments are available") + // } + + // wait for Downloader service to download all expected snapshots + segments, _, err := SegmentsCaplin(s.dir, 0) + if err != nil { + return err + } + noneDone := true + for index := range segments { + segment := segments[index] + // The same slot=>offset mapping is used for both beacon blocks and blob sidecars. + if segment.Type.Enum() != snaptype.CaplinEnums.BeaconBlocks && segment.Type.Enum() != snaptype.CaplinEnums.BlobSidecars { + continue + } + if segment.Type.HasIndexFiles(segment, logger) { + continue + } + p := &background.Progress{} + noneDone = false + if err := BeaconSimpleIdx(ctx, segment, s.Salt, s.tmpdir, p, log.LvlDebug, logger); err != nil { + return err + } + } + if noneDone { + return nil + } + + return s.OpenFolder() +} + +func (s *CaplinStateSnapshots) Get(tbl string, slot uint64) ([]byte, error) { + defer func() { + if rec := recover(); rec != nil { + panic(fmt.Sprintf("Get(%s, %d), %s, %s\n", tbl, slot, rec, debug.Stack())) + } + }() + + view := s.View() + defer view.Close() + + seg, ok := view.VisibleSegment(slot, tbl) + if !ok { + return nil, nil + } + + return seg.Get(slot) +} diff --git a/turbo/snapshotsync/freezeblocks/caplin_snapshots.go b/turbo/snapshotsync/freezeblocks/caplin_snapshots.go index 5145e335a24..2f37fb583bd 100644 --- a/turbo/snapshotsync/freezeblocks/caplin_snapshots.go +++ b/turbo/snapshotsync/freezeblocks/caplin_snapshots.go @@ -19,7 +19,6 @@ package freezeblocks import ( "bytes" "context" - "encoding/binary" "errors" "fmt" "math" @@ -41,7 +40,6 @@ import ( "github.com/erigontech/erigon-lib/kv" "github.com/erigontech/erigon-lib/kv/dbutils" "github.com/erigontech/erigon-lib/log/v3" - "github.com/erigontech/erigon-lib/recsplit" "github.com/erigontech/erigon-lib/seg" "github.com/erigontech/erigon/cl/clparams" @@ -55,33 +53,6 @@ import ( var sidecarSSZSize = (&cltypes.BlobSidecar{}).EncodingSizeSSZ() -func BeaconSimpleIdx(ctx context.Context, sn snaptype.FileInfo, salt uint32, tmpDir string, p *background.Progress, lvl log.Lvl, logger log.Logger) (err error) { - num := make([]byte, binary.MaxVarintLen64) - cfg := recsplit.RecSplitArgs{ - Enums: true, - BucketSize: 2000, - LeafSize: 8, - TmpDir: tmpDir, - Salt: &salt, - BaseDataID: sn.From, - } - if err := snaptype.BuildIndex(ctx, sn, cfg, log.LvlDebug, p, func(idx *recsplit.RecSplit, i, offset uint64, word []byte) error { - if i%20_000 == 0 { - logger.Log(lvl, "Generating idx for "+sn.Type.Name(), "progress", i) - } - p.Processed.Add(1) - n := binary.PutUvarint(num, i) - if err := idx.AddKey(num[:n], offset); err != nil { - return err - } - return nil - }, logger); err != nil { - return fmt.Errorf("idx: %w", err) - } - - return nil -} - // value: chunked(ssz(SignedBeaconBlocks)) // slot -> beacon_slot_segment_offset @@ -515,7 +486,7 @@ func dumpBeaconBlocksRange(ctx context.Context, db kv.RoDB, fromSlot uint64, toS // Ugly hack to wait for fsync time.Sleep(15 * time.Second) - return BeaconSimpleIdx(ctx, f, salt, tmpDir, p, lvl, logger) + return snapshotsync.BeaconSimpleIdx(ctx, f, salt, tmpDir, p, lvl, logger) } func DumpBlobSidecarsRange(ctx context.Context, db kv.RoDB, storage blob_storage.BlobStorage, fromSlot uint64, toSlot uint64, salt uint32, dirs datadir.Dirs, workers int, blobCountFn BlobCountBySlotFn, lvl log.Lvl, logger log.Logger) error { @@ -601,7 +572,7 @@ func DumpBlobSidecarsRange(ctx context.Context, db kv.RoDB, storage blob_storage // Generate .idx file, which is the slot => offset mapping. p := &background.Progress{} - return BeaconSimpleIdx(ctx, f, salt, tmpDir, p, lvl, logger) + return snapshotsync.BeaconSimpleIdx(ctx, f, salt, tmpDir, p, lvl, logger) } func DumpBeaconBlocks(ctx context.Context, db kv.RoDB, fromSlot, toSlot uint64, salt uint32, dirs datadir.Dirs, workers int, lvl log.Lvl, logger log.Logger) error { @@ -665,7 +636,7 @@ func (s *CaplinSnapshots) BuildMissingIndices(ctx context.Context, logger log.Lo } p := &background.Progress{} noneDone = false - if err := BeaconSimpleIdx(ctx, segment, s.Salt, s.tmpdir, p, log.LvlDebug, logger); err != nil { + if err := snapshotsync.BeaconSimpleIdx(ctx, segment, s.Salt, s.tmpdir, p, log.LvlDebug, logger); err != nil { return err } } diff --git a/turbo/snapshotsync/snapshots.go b/turbo/snapshotsync/snapshots.go index da86afbc070..cc55fb8346a 100644 --- a/turbo/snapshotsync/snapshots.go +++ b/turbo/snapshotsync/snapshots.go @@ -249,6 +249,9 @@ type DirtySegment struct { refcount atomic.Int32 canDelete atomic.Bool + + // only caplin state + filePath string } func NewDirtySegment(segType snaptype.Type, version snaptype.Version, from uint64, to uint64, frozen bool) *DirtySegment { @@ -274,6 +277,28 @@ func (s *VisibleSegment) IsIndexed() bool { return s.src.IsIndexed() } +func (v *VisibleSegment) Get(globalId uint64) ([]byte, error) { + idxSlot := v.src.Index() + + if idxSlot == nil { + return nil, nil + } + blockOffset := idxSlot.OrdinalLookup(globalId - idxSlot.BaseDataID()) + + gg := v.src.MakeGetter() + gg.Reset(blockOffset) + if !gg.HasNext() { + return nil, nil + } + var buf []byte + buf, _ = gg.Next(buf) + if len(buf) == 0 { + return nil, nil + } + + return buf, nil +} + func DirtySegmentLess(i, j *DirtySegment) bool { if i.from != j.from { return i.from < j.from