diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 5eb8fa4146..899d839596 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -67,7 +67,6 @@ var ( utils.DirectBroadcastFlag, utils.DisableSnapProtocolFlag, utils.EnableTrustProtocolFlag, - utils.PipeCommitFlag, utils.RangeLimitFlag, utils.USBFlag, utils.SmartCardDaemonPathFlag, diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index f3aab43d70..5c174dbee7 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -117,11 +117,6 @@ var ( Usage: "Enable trust protocol", Category: flags.FastNodeCategory, } - PipeCommitFlag = &cli.BoolFlag{ - Name: "pipecommit", - Usage: "Enable MPT pipeline commit, it will improve syncing performance. It is an experimental feature(default is false)", - Category: flags.DeprecatedCategory, - } RangeLimitFlag = &cli.BoolFlag{ Name: "rangelimit", Usage: "Enable 5000 blocks limit for range query", @@ -1982,9 +1977,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { if ctx.IsSet(EnableTrustProtocolFlag.Name) { cfg.EnableTrustProtocol = ctx.IsSet(EnableTrustProtocolFlag.Name) } - if ctx.IsSet(PipeCommitFlag.Name) { - log.Warn("The --pipecommit flag is deprecated and could be removed in the future!") - } if ctx.IsSet(RangeLimitFlag.Name) { cfg.RangeLimit = ctx.Bool(RangeLimitFlag.Name) } diff --git a/core/block_validator.go b/core/block_validator.go index d15e2cd786..6b292ddbe4 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -19,9 +19,7 @@ package core import ( "errors" "fmt" - "time" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" @@ -29,8 +27,6 @@ import ( "github.com/ethereum/go-ethereum/trie" ) -const badBlockCacheExpire = 30 * time.Second - type BlockValidatorOption func(*BlockValidator) *BlockValidator func EnableRemoteVerifyManager(remoteValidator *remoteVerifyManager) BlockValidatorOption { @@ -74,9 +70,6 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error { if v.bc.HasBlockAndState(block.Hash(), block.NumberU64()) { return ErrKnownBlock } - if v.bc.isCachedBadBlock(block) { - return ErrKnownBadBlock - } // Header validity is known at this point. Here we verify that uncles, transactions // and withdrawals given in the block body match the header. header := block.Header() @@ -192,23 +185,12 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD return nil }, } - if statedb.IsPipeCommit() { - validateFuns = append(validateFuns, func() error { - if err := statedb.WaitPipeVerification(); err != nil { - return err - } - statedb.CorrectAccountsRoot(common.Hash{}) - statedb.Finalise(v.config.IsEIP158(header.Number)) - return nil - }) - } else { - validateFuns = append(validateFuns, func() error { - if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root { - return fmt.Errorf("invalid merkle root (remote: %x local: %x) dberr: %w", header.Root, root, statedb.Error()) - } - return nil - }) - } + validateFuns = append(validateFuns, func() error { + if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root { + return fmt.Errorf("invalid merkle root (remote: %x local: %x) dberr: %w", header.Root, root, statedb.Error()) + } + return nil + }) validateRes := make(chan error, len(validateFuns)) for _, f := range validateFuns { tmpFunc := f diff --git a/core/blockchain.go b/core/blockchain.go index b2a56d74bf..1304718a1f 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -102,11 +102,10 @@ var ( blockRecvTimeDiffGauge = metrics.NewRegisteredGauge("chain/block/recvtimediff", nil) - errStateRootVerificationFailed = errors.New("state root verification failed") - errInsertionInterrupted = errors.New("insertion is interrupted") - errChainStopped = errors.New("blockchain is stopped") - errInvalidOldChain = errors.New("invalid old chain") - errInvalidNewChain = errors.New("invalid new chain") + errInsertionInterrupted = errors.New("insertion is interrupted") + errChainStopped = errors.New("blockchain is stopped") + errInvalidOldChain = errors.New("invalid old chain") + errInvalidNewChain = errors.New("invalid new chain") ) const ( @@ -116,7 +115,6 @@ const ( receiptsCacheLimit = 10000 sidecarsCacheLimit = 1024 txLookupCacheLimit = 1024 - maxBadBlockLimit = 16 maxFutureBlocks = 256 maxTimeFutureBlocks = 30 TriesInMemory = 128 @@ -126,8 +124,6 @@ const ( diffLayerFreezerRecheckInterval = 3 * time.Second maxDiffForkDist = 11 // Maximum allowed backward distance from the chain head - rewindBadBlockInterval = 1 * time.Second - // BlockChainVersion ensures that an incompatible database forces a resync from scratch. // // Changelog: @@ -253,7 +249,6 @@ type BlockChain struct { snaps *snapshot.Tree // Snapshot tree for fast trie leaf access triegc *prque.Prque[int64, common.Hash] // Priority queue mapping block numbers to tries to gc gcproc time.Duration // Accumulates canonical block processing for trie dumping - commitLock sync.Mutex // CommitLock is used to protect above field from being modified concurrently lastWrite uint64 // Last block when the state was flushed flushInterval atomic.Int64 // Time interval (processing time) after which to flush a state triedb *triedb.Database // The database handler for maintaining trie nodes. @@ -294,8 +289,6 @@ type BlockChain struct { // future blocks are blocks added for later processing futureBlocks *lru.Cache[common.Hash, *types.Block] - // Cache for the blocks that failed to pass MPT root verification - badBlockCache *lru.Cache[common.Hash, time.Time] // trusted diff layers diffLayerCache *exlru.Cache // Cache for the diffLayers @@ -316,7 +309,6 @@ type BlockChain struct { processor Processor // Block transaction processor interface forker *ForkChoice vmConfig vm.Config - pipeCommit bool // monitor doubleSignMonitor *monitor.DoubleSignMonitor @@ -378,7 +370,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis blockCache: lru.NewCache[common.Hash, *types.Block](blockCacheLimit), txLookupCache: lru.NewCache[common.Hash, txLookup](txLookupCacheLimit), futureBlocks: lru.NewCache[common.Hash, *types.Block](maxFutureBlocks), - badBlockCache: lru.NewCache[common.Hash, time.Time](maxBadBlockLimit), diffLayerCache: diffLayerCache, diffLayerChanCache: diffLayerChanCache, engine: engine, @@ -559,11 +550,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis bc.wg.Add(1) go bc.trustedDiffLayerLoop() } - if bc.pipeCommit { - // check current block and rewind invalid one - bc.wg.Add(1) - go bc.rewindInvalidHeaderBlockLoop() - } if bc.doubleSignMonitor != nil { bc.wg.Add(1) @@ -817,26 +803,6 @@ func (bc *BlockChain) SetHeadWithTimestamp(timestamp uint64) error { return nil } -func (bc *BlockChain) tryRewindBadBlocks() { - if !bc.chainmu.TryLock() { - return - } - defer bc.chainmu.Unlock() - block := bc.CurrentBlock() - snaps := bc.snaps - // Verified and Result is false - if snaps != nil && snaps.Snapshot(block.Root) != nil && - snaps.Snapshot(block.Root).Verified() && !snaps.Snapshot(block.Root).WaitAndGetVerifyRes() { - // Rewind by one block - log.Warn("current block verified failed, rewind to its parent", "height", block.Number.Uint64(), "hash", block.Hash()) - bc.futureBlocks.Remove(block.Hash()) - bc.badBlockCache.Add(block.Hash(), time.Now()) - bc.diffLayerCache.Remove(block.Hash()) - bc.reportBlock(bc.GetBlockByHash(block.Hash()), nil, errStateRootVerificationFailed) - bc.setHeadBeyondRoot(block.Number.Uint64()-1, 0, common.Hash{}, false) - } -} - // rewindHashHead implements the logic of rewindHead in the context of hash scheme. func (bc *BlockChain) rewindHashHead(head *types.Header, root common.Hash) (*types.Header, uint64) { var ( @@ -1812,88 +1778,8 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. wg.Done() }() - tryCommitTrieDB := func() error { - bc.commitLock.Lock() - defer bc.commitLock.Unlock() - - // If node is running in path mode, skip explicit gc operation - // which is unnecessary in this mode. - if bc.triedb.Scheme() == rawdb.PathScheme { - return nil - } - - triedb := bc.stateCache.TrieDB() - // If we're running an archive node, always flush - if bc.cacheConfig.TrieDirtyDisabled { - return triedb.Commit(block.Root(), false) - } - // Full but not archive node, do proper garbage collection - triedb.Reference(block.Root(), common.Hash{}) // metadata reference to keep trie alive - bc.triegc.Push(block.Root(), -int64(block.NumberU64())) - - // Flush limits are not considered for the first TriesInMemory blocks. - current := block.NumberU64() - if current <= bc.TriesInMemory() { - return nil - } - // If we exceeded our memory allowance, flush matured singleton nodes to disk - var ( - _, nodes, _, imgs = triedb.Size() - limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024 - ) - if nodes > limit || imgs > 4*1024*1024 { - triedb.Cap(limit - ethdb.IdealBatchSize) - } - // Find the next state trie we need to commit - chosen := current - bc.triesInMemory - flushInterval := time.Duration(bc.flushInterval.Load()) - // If we exceeded out time allowance, flush an entire trie to disk - if bc.gcproc > flushInterval { - canWrite := true - if posa, ok := bc.engine.(consensus.PoSA); ok { - if !posa.EnoughDistance(bc, block.Header()) { - canWrite = false - } - } - if canWrite { - // If the header is missing (canonical chain behind), we're reorging a low - // diff sidechain. Suspend committing until this operation is completed. - header := bc.GetHeaderByNumber(chosen) - if header == nil { - log.Warn("Reorg in progress, trie commit postponed", "number", chosen) - } else { - // If we're exceeding limits but haven't reached a large enough memory gap, - // warn the user that the system is becoming unstable. - if chosen < bc.lastWrite+bc.triesInMemory && bc.gcproc >= 2*flushInterval { - log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", flushInterval, "optimum", float64(chosen-bc.lastWrite)/float64(bc.triesInMemory)) - } - // Flush an entire trie and restart the counters - triedb.Commit(header.Root, true) - rawdb.WriteSafePointBlockNumber(bc.db, chosen) - bc.lastWrite = chosen - bc.gcproc = 0 - } - } - } - // Garbage collect anything below our required write retention - wg2 := sync.WaitGroup{} - for !bc.triegc.Empty() { - root, number := bc.triegc.Pop() - if uint64(-number) > chosen { - bc.triegc.Push(root, number) - break - } - wg2.Add(1) - go func() { - triedb.Dereference(root) - wg2.Done() - }() - } - wg2.Wait() - return nil - } // Commit all cached state changes into underlying memory database. - _, diffLayer, err := state.Commit(block.NumberU64(), bc.tryRewindBadBlocks, tryCommitTrieDB) + _, diffLayer, err := state.Commit(block.NumberU64(), nil, nil) if err != nil { return err } @@ -1913,6 +1799,80 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. go bc.cacheDiffLayer(diffLayer, diffLayerCh) } + + // If node is running in path mode, skip explicit gc operation + // which is unnecessary in this mode. + if bc.triedb.Scheme() == rawdb.PathScheme { + return nil + } + + triedb := bc.stateCache.TrieDB() + // If we're running an archive node, always flush + if bc.cacheConfig.TrieDirtyDisabled { + return triedb.Commit(block.Root(), false) + } + // Full but not archive node, do proper garbage collection + triedb.Reference(block.Root(), common.Hash{}) // metadata reference to keep trie alive + bc.triegc.Push(block.Root(), -int64(block.NumberU64())) + + // Flush limits are not considered for the first TriesInMemory blocks. + current := block.NumberU64() + if current <= bc.TriesInMemory() { + return nil + } + // If we exceeded our memory allowance, flush matured singleton nodes to disk + var ( + _, nodes, _, imgs = triedb.Size() + limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024 + ) + if nodes > limit || imgs > 4*1024*1024 { + triedb.Cap(limit - ethdb.IdealBatchSize) + } + // Find the next state trie we need to commit + chosen := current - bc.triesInMemory + flushInterval := time.Duration(bc.flushInterval.Load()) + // If we exceeded out time allowance, flush an entire trie to disk + if bc.gcproc > flushInterval { + canWrite := true + if posa, ok := bc.engine.(consensus.PoSA); ok { + if !posa.EnoughDistance(bc, block.Header()) { + canWrite = false + } + } + if canWrite { + // If the header is missing (canonical chain behind), we're reorging a low + // diff sidechain. Suspend committing until this operation is completed. + header := bc.GetHeaderByNumber(chosen) + if header == nil { + log.Warn("Reorg in progress, trie commit postponed", "number", chosen) + } else { + // If we're exceeding limits but haven't reached a large enough memory gap, + // warn the user that the system is becoming unstable. + if chosen < bc.lastWrite+bc.triesInMemory && bc.gcproc >= 2*flushInterval { + log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", flushInterval, "optimum", float64(chosen-bc.lastWrite)/float64(bc.triesInMemory)) + } + // Flush an entire trie and restart the counters + triedb.Commit(header.Root, true) + rawdb.WriteSafePointBlockNumber(bc.db, chosen) + bc.lastWrite = chosen + bc.gcproc = 0 + } + } + } + // Garbage collect anything below our required write retention + for !bc.triegc.Empty() { + root, number := bc.triegc.Pop() + if uint64(-number) > chosen { + bc.triegc.Push(root, number) + break + } + wg.Add(1) + go func() { + triedb.Dereference(root) + wg.Done() + }() + } + wg.Wait() return nil } @@ -2269,9 +2229,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) } // Process block using the parent state as reference point - if bc.pipeCommit { - statedb.EnablePipeCommit() - } statedb.SetExpectedStateRoot(block.Root()) pstart := time.Now() statedb, receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig) @@ -2889,22 +2846,6 @@ func (bc *BlockChain) updateFutureBlocks() { } } -func (bc *BlockChain) rewindInvalidHeaderBlockLoop() { - recheck := time.NewTicker(rewindBadBlockInterval) - defer func() { - recheck.Stop() - bc.wg.Done() - }() - for { - select { - case <-recheck.C: - bc.tryRewindBadBlocks() - case <-bc.quit: - return - } - } -} - func (bc *BlockChain) trustedDiffLayerLoop() { recheck := time.NewTicker(diffLayerFreezerRecheckInterval) defer func() { @@ -3042,17 +2983,6 @@ func (bc *BlockChain) skipBlock(err error, it *insertIterator) bool { return false } -func (bc *BlockChain) isCachedBadBlock(block *types.Block) bool { - if timeAt, exist := bc.badBlockCache.Get(block.Hash()); exist { - if time.Since(timeAt) >= badBlockCacheExpire { - bc.badBlockCache.Remove(block.Hash()) - return false - } - return true - } - return false -} - // reportBlock logs a bad block error. // bad block need not save receipts & sidecars. func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) { @@ -3114,11 +3044,6 @@ func (bc *BlockChain) InsertHeaderChain(chain []*types.Header) (int, error) { func (bc *BlockChain) TriesInMemory() uint64 { return bc.triesInMemory } -func EnablePipelineCommit(bc *BlockChain) (*BlockChain, error) { - bc.pipeCommit = false - return bc, nil -} - func EnablePersistDiff(limit uint64) BlockChainOption { return func(chain *BlockChain) (*BlockChain, error) { chain.diffLayerFreezerBlockLimit = limit diff --git a/core/blockchain_diff_test.go b/core/blockchain_diff_test.go index 8ec14bce43..50facedac6 100644 --- a/core/blockchain_diff_test.go +++ b/core/blockchain_diff_test.go @@ -237,7 +237,7 @@ func TestFreezeDiffLayer(t *testing.T) { // Wait for the buffer to be zero. } // Minus one empty block. - if fullBackend.chain.diffQueue.Size() > blockNum-1 && fullBackend.chain.diffQueue.Size() < blockNum-2 { + if fullBackend.chain.diffQueue.Size() != blockNum-1 { t.Errorf("size of diff queue is wrong, expected: %d, get: %d", blockNum-1, fullBackend.chain.diffQueue.Size()) } diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go index 8a05a6cfb1..44fbe2e20e 100644 --- a/core/blockchain_reader.go +++ b/core/blockchain_reader.go @@ -351,12 +351,6 @@ func (bc *BlockChain) HasState(hash common.Hash) bool { if bc.NoTries() { return bc.snaps != nil && bc.snaps.Snapshot(hash) != nil } - if bc.pipeCommit && bc.snaps != nil { - // If parent snap is pending on verification, treat it as state exist - if s := bc.snaps.Snapshot(hash); s != nil && !s.Verified() { - return true - } - } _, err := bc.stateCache.OpenTrie(hash) return err == nil } diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 3917117b91..d3c3a08c42 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -51,8 +51,7 @@ import ( // So we can deterministically seed different blockchains var ( canonicalSeed = 1 - forkSeed1 = 2 - forkSeed2 = 3 + forkSeed = 2 TestTriesInMemory = 128 ) @@ -61,7 +60,7 @@ var ( // chain. Depending on the full flag, it creates either a full block chain or a // header only chain. The database and genesis specification for block generation // are also returned in case more test blocks are needed later. -func newCanonical(engine consensus.Engine, n int, full bool, scheme string, pipeline bool) (ethdb.Database, *Genesis, *BlockChain, error) { +func newCanonical(engine consensus.Engine, n int, full bool, scheme string) (ethdb.Database, *Genesis, *BlockChain, error) { var ( genesis = &Genesis{ BaseFee: big.NewInt(params.InitialBaseFee), @@ -69,12 +68,7 @@ func newCanonical(engine consensus.Engine, n int, full bool, scheme string, pipe } ) - // Initialize a fresh chain with only a genesis block - var ops []BlockChainOption - if pipeline { - ops = append(ops, EnablePipelineCommit) - } - blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil, ops...) + blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil, nil) // Create and inject the requested chain if n == 0 { return rawdb.NewMemoryDatabase(), genesis, blockchain, nil @@ -96,53 +90,9 @@ func newGwei(n int64) *big.Int { } // Test fork of length N starting from block i -func testInvalidStateRootBlockImport(t *testing.T, blockchain *BlockChain, i, n int, pipeline bool) { - // Copy old chain up to #i into a new db - db, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, true, rawdb.HashScheme, pipeline) - if err != nil { - t.Fatal("could not make new canonical in testFork", err) - } - defer blockchain2.Stop() - - // Assert the chains have the same header/block at #i - hash1 := blockchain.GetBlockByNumber(uint64(i)).Hash() - hash2 := blockchain2.GetBlockByNumber(uint64(i)).Hash() - if hash1 != hash2 { - t.Errorf("chain content mismatch at %d: have hash %v, want hash %v", i, hash2, hash1) - } - // Extend the newly created chain - blockChainB := makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), db, forkSeed1) - for idx, block := range blockChainB { - block.SetRoot(common.Hash{0: byte(forkSeed1), 19: byte(idx)}) - } - previousBlock := blockchain.CurrentBlock() - // Sanity check that the forked chain can be imported into the original - if _, err := blockchain.InsertChain(blockChainB); err == nil { - t.Fatalf("failed to report insert error") - } - - time.Sleep(2 * rewindBadBlockInterval) - latestBlock := blockchain.CurrentBlock() - if latestBlock.Hash() != previousBlock.Hash() || latestBlock.Number.Uint64() != previousBlock.Number.Uint64() { - t.Fatalf("rewind do not take effect") - } - db, _, blockchain3, err := newCanonical(ethash.NewFaker(), i, true, rawdb.HashScheme, pipeline) - if err != nil { - t.Fatal("could not make new canonical in testFork", err) - } - defer blockchain3.Stop() - - blockChainC := makeBlockChain(blockchain3.chainConfig, blockchain3.GetBlockByHash(blockchain3.CurrentBlock().Hash()), n, ethash.NewFaker(), db, forkSeed2) - - if _, err := blockchain.InsertChain(blockChainC); err != nil { - t.Fatalf("failed to insert forking chain: %v", err) - } -} - -// Test fork of length N starting from block i -func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, comparator func(td1, td2 *big.Int), scheme string, pipeline bool) { +func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, comparator func(td1, td2 *big.Int), scheme string) { // Copy old chain up to #i into a new db - genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full, scheme, pipeline) + genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full, scheme) if err != nil { t.Fatal("could not make new canonical in testFork", err) } @@ -166,12 +116,12 @@ func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, compara headerChainB []*types.Header ) if full { - blockChainB = makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), genDb, forkSeed1) + blockChainB = makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), genDb, forkSeed) if _, err := blockchain2.InsertChain(blockChainB); err != nil { t.Fatalf("failed to insert forking chain: %v", err) } } else { - headerChainB = makeHeaderChain(blockchain2.chainConfig, blockchain2.CurrentHeader(), n, ethash.NewFaker(), genDb, forkSeed1) + headerChainB = makeHeaderChain(blockchain2.chainConfig, blockchain2.CurrentHeader(), n, ethash.NewFaker(), genDb, forkSeed) if _, err := blockchain2.InsertHeaderChain(headerChainB); err != nil { t.Fatalf("failed to insert forking chain: %v", err) } @@ -182,7 +132,7 @@ func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, compara if full { cur := blockchain.CurrentBlock() tdPre = blockchain.GetTd(cur.Hash(), cur.Number.Uint64()) - if err := testBlockChainImport(blockChainB, pipeline, blockchain); err != nil { + if err := testBlockChainImport(blockChainB, blockchain); err != nil { t.Fatalf("failed to import forked block chain: %v", err) } last := blockChainB[len(blockChainB)-1] @@ -202,7 +152,7 @@ func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, compara // testBlockChainImport tries to process a chain of blocks, writing them into // the database if successful. -func testBlockChainImport(chain types.Blocks, pipelineCommit bool, blockchain *BlockChain) error { +func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error { for _, block := range chain { // Try and process the block err := blockchain.engine.VerifyHeader(blockchain, block.Header()) @@ -220,9 +170,6 @@ func testBlockChainImport(chain types.Blocks, pipelineCommit bool, blockchain *B return err } statedb.SetExpectedStateRoot(block.Root()) - if pipelineCommit { - statedb.EnablePipeCommit() - } statedb, receipts, _, usedGas, err := blockchain.processor.Process(block, statedb, vm.Config{}) if err != nil { blockchain.reportBlock(block, receipts, err) @@ -262,26 +209,13 @@ func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error return nil } -func TestBlockImportVerification(t *testing.T) { - length := 5 - - // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, true, rawdb.HashScheme, true) - if err != nil { - t.Fatalf("failed to make new canonical chain: %v", err) - } - defer processor.Stop() - // Start fork from current height - processor, _ = EnablePipelineCommit(processor) - testInvalidStateRootBlockImport(t, processor, length, 10, true) -} func TestLastBlock(t *testing.T) { testLastBlock(t, rawdb.HashScheme) testLastBlock(t, rawdb.PathScheme) } func testLastBlock(t *testing.T, scheme string) { - genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, true, scheme, false) + genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, true, scheme) if err != nil { t.Fatalf("failed to create pristine chain: %v", err) } @@ -300,7 +234,7 @@ func testLastBlock(t *testing.T, scheme string) { // The chain is reorged to whatever specified. func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full bool, scheme string) { // Copy old chain up to #i into a new db - genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full, scheme, false) + genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full, scheme) if err != nil { t.Fatal("could not make new canonical in testFork", err) } @@ -321,7 +255,7 @@ func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full b // Extend the newly created chain if full { - blockChainB := makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), genDb, forkSeed1) + blockChainB := makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), genDb, forkSeed) if _, err := blockchain2.InsertChain(blockChainB); err != nil { t.Fatalf("failed to insert forking chain: %v", err) } @@ -332,7 +266,7 @@ func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full b t.Fatalf("failed to reorg to the given chain") } } else { - headerChainB := makeHeaderChain(blockchain2.chainConfig, blockchain2.CurrentHeader(), n, ethash.NewFaker(), genDb, forkSeed1) + headerChainB := makeHeaderChain(blockchain2.chainConfig, blockchain2.CurrentHeader(), n, ethash.NewFaker(), genDb, forkSeed) if _, err := blockchain2.InsertHeaderChain(headerChainB); err != nil { t.Fatalf("failed to insert forking chain: %v", err) } @@ -348,21 +282,20 @@ func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full b // Tests that given a starting canonical chain of a given size, it can be extended // with various length chains. func TestExtendCanonicalHeaders(t *testing.T) { - testExtendCanonical(t, false, rawdb.HashScheme, false) - testExtendCanonical(t, false, rawdb.PathScheme, false) + testExtendCanonical(t, false, rawdb.HashScheme) + testExtendCanonical(t, false, rawdb.PathScheme) } func TestExtendCanonicalBlocks(t *testing.T) { - testExtendCanonical(t, true, rawdb.HashScheme, false) - testExtendCanonical(t, true, rawdb.PathScheme, false) - testExtendCanonical(t, true, rawdb.HashScheme, true) + testExtendCanonical(t, true, rawdb.HashScheme) + testExtendCanonical(t, true, rawdb.PathScheme) } -func testExtendCanonical(t *testing.T, full bool, scheme string, pipeline bool) { +func testExtendCanonical(t *testing.T, full bool, scheme string) { length := 5 // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, pipeline) + _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } @@ -375,10 +308,10 @@ func testExtendCanonical(t *testing.T, full bool, scheme string, pipeline bool) } } // Start fork from current height - testFork(t, processor, length, 1, full, better, scheme, pipeline) - testFork(t, processor, length, 2, full, better, scheme, pipeline) - testFork(t, processor, length, 5, full, better, scheme, pipeline) - testFork(t, processor, length, 10, full, better, scheme, pipeline) + testFork(t, processor, length, 1, full, better, scheme) + testFork(t, processor, length, 2, full, better, scheme) + testFork(t, processor, length, 5, full, better, scheme) + testFork(t, processor, length, 10, full, better, scheme) } // Tests that given a starting canonical chain of a given size, it can be extended @@ -396,7 +329,7 @@ func testExtendCanonicalAfterMerge(t *testing.T, full bool, scheme string) { length := 5 // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, false) + _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } @@ -409,20 +342,19 @@ func testExtendCanonicalAfterMerge(t *testing.T, full bool, scheme string) { // Tests that given a starting canonical chain of a given size, creating shorter // forks do not take canonical ownership. func TestShorterForkHeaders(t *testing.T) { - testShorterFork(t, false, rawdb.HashScheme, false) - testShorterFork(t, false, rawdb.PathScheme, false) + testShorterFork(t, false, rawdb.HashScheme) + testShorterFork(t, false, rawdb.PathScheme) } func TestShorterForkBlocks(t *testing.T) { - testShorterFork(t, true, rawdb.HashScheme, false) - testShorterFork(t, true, rawdb.PathScheme, false) - testShorterFork(t, true, rawdb.HashScheme, true) + testShorterFork(t, true, rawdb.HashScheme) + testShorterFork(t, true, rawdb.PathScheme) } -func testShorterFork(t *testing.T, full bool, scheme string, pipeline bool) { +func testShorterFork(t *testing.T, full bool, scheme string) { length := 10 // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, pipeline) + _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } @@ -435,12 +367,12 @@ func testShorterFork(t *testing.T, full bool, scheme string, pipeline bool) { } } // Sum of numbers must be less than `length` for this to be a shorter fork - testFork(t, processor, 0, 3, full, worse, scheme, pipeline) - testFork(t, processor, 0, 7, full, worse, scheme, pipeline) - testFork(t, processor, 1, 1, full, worse, scheme, pipeline) - testFork(t, processor, 1, 7, full, worse, scheme, pipeline) - testFork(t, processor, 5, 3, full, worse, scheme, pipeline) - testFork(t, processor, 5, 4, full, worse, scheme, pipeline) + testFork(t, processor, 0, 3, full, worse, scheme) + testFork(t, processor, 0, 7, full, worse, scheme) + testFork(t, processor, 1, 1, full, worse, scheme) + testFork(t, processor, 1, 7, full, worse, scheme) + testFork(t, processor, 5, 3, full, worse, scheme) + testFork(t, processor, 5, 4, full, worse, scheme) } // Tests that given a starting canonical chain of a given size, creating shorter @@ -458,7 +390,7 @@ func testShorterForkAfterMerge(t *testing.T, full bool, scheme string) { length := 10 // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, false) + _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } @@ -475,20 +407,19 @@ func testShorterForkAfterMerge(t *testing.T, full bool, scheme string) { // Tests that given a starting canonical chain of a given size, creating longer // forks do take canonical ownership. func TestLongerForkHeaders(t *testing.T) { - testLongerFork(t, false, rawdb.HashScheme, false) - testLongerFork(t, false, rawdb.PathScheme, false) + testLongerFork(t, false, rawdb.HashScheme) + testLongerFork(t, false, rawdb.PathScheme) } func TestLongerForkBlocks(t *testing.T) { - testLongerFork(t, true, rawdb.HashScheme, false) - testLongerFork(t, true, rawdb.PathScheme, false) - testLongerFork(t, true, rawdb.HashScheme, true) + testLongerFork(t, true, rawdb.HashScheme) + testLongerFork(t, true, rawdb.PathScheme) } -func testLongerFork(t *testing.T, full bool, scheme string, pipeline bool) { +func testLongerFork(t *testing.T, full bool, scheme string) { length := 10 // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, pipeline) + _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } @@ -517,7 +448,7 @@ func testLongerForkAfterMerge(t *testing.T, full bool, scheme string) { length := 10 // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, false) + _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } @@ -534,20 +465,19 @@ func testLongerForkAfterMerge(t *testing.T, full bool, scheme string) { // Tests that given a starting canonical chain of a given size, creating equal // forks do take canonical ownership. func TestEqualForkHeaders(t *testing.T) { - testEqualFork(t, false, rawdb.HashScheme, false) - testEqualFork(t, false, rawdb.PathScheme, false) + testEqualFork(t, false, rawdb.HashScheme) + testEqualFork(t, false, rawdb.PathScheme) } func TestEqualForkBlocks(t *testing.T) { - testEqualFork(t, true, rawdb.HashScheme, false) - testEqualFork(t, true, rawdb.PathScheme, false) - testEqualFork(t, true, rawdb.HashScheme, true) + testEqualFork(t, true, rawdb.HashScheme) + testEqualFork(t, true, rawdb.PathScheme) } -func testEqualFork(t *testing.T, full bool, scheme string, pipeline bool) { +func testEqualFork(t *testing.T, full bool, scheme string) { length := 10 // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, pipeline) + _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } @@ -560,12 +490,12 @@ func testEqualFork(t *testing.T, full bool, scheme string, pipeline bool) { } } // Sum of numbers must be equal to `length` for this to be an equal fork - testFork(t, processor, 0, 10, full, equal, scheme, pipeline) - testFork(t, processor, 1, 9, full, equal, scheme, pipeline) - testFork(t, processor, 2, 8, full, equal, scheme, pipeline) - testFork(t, processor, 5, 5, full, equal, scheme, pipeline) - testFork(t, processor, 6, 4, full, equal, scheme, pipeline) - testFork(t, processor, 9, 1, full, equal, scheme, pipeline) + testFork(t, processor, 0, 10, full, equal, scheme) + testFork(t, processor, 1, 9, full, equal, scheme) + testFork(t, processor, 2, 8, full, equal, scheme) + testFork(t, processor, 5, 5, full, equal, scheme) + testFork(t, processor, 6, 4, full, equal, scheme) + testFork(t, processor, 9, 1, full, equal, scheme) } // Tests that given a starting canonical chain of a given size, creating equal @@ -583,7 +513,7 @@ func testEqualForkAfterMerge(t *testing.T, full bool, scheme string) { length := 10 // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, false) + _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } @@ -599,18 +529,17 @@ func testEqualForkAfterMerge(t *testing.T, full bool, scheme string) { // Tests that chains missing links do not get accepted by the processor. func TestBrokenHeaderChain(t *testing.T) { - testBrokenChain(t, false, rawdb.HashScheme, false) - testBrokenChain(t, false, rawdb.PathScheme, false) + testBrokenChain(t, false, rawdb.HashScheme) + testBrokenChain(t, false, rawdb.PathScheme) } func TestBrokenBlockChain(t *testing.T) { - testBrokenChain(t, true, rawdb.HashScheme, false) - testBrokenChain(t, true, rawdb.PathScheme, false) - testBrokenChain(t, true, rawdb.HashScheme, true) + testBrokenChain(t, true, rawdb.HashScheme) + testBrokenChain(t, true, rawdb.PathScheme) } -func testBrokenChain(t *testing.T, full bool, scheme string, pipeline bool) { +func testBrokenChain(t *testing.T, full bool, scheme string) { // Make chain starting from genesis - genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 10, full, scheme, pipeline) + genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 10, full, scheme) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } @@ -618,12 +547,12 @@ func testBrokenChain(t *testing.T, full bool, scheme string, pipeline bool) { // Create a forked chain, and try to insert with a missing link if full { - chain := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), 5, ethash.NewFaker(), genDb, forkSeed1)[1:] - if err := testBlockChainImport(chain, pipeline, blockchain); err == nil { + chain := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), 5, ethash.NewFaker(), genDb, forkSeed)[1:] + if err := testBlockChainImport(chain, blockchain); err == nil { t.Errorf("broken block chain not reported") } } else { - chain := makeHeaderChain(blockchain.chainConfig, blockchain.CurrentHeader(), 5, ethash.NewFaker(), genDb, forkSeed1)[1:] + chain := makeHeaderChain(blockchain.chainConfig, blockchain.CurrentHeader(), 5, ethash.NewFaker(), genDb, forkSeed)[1:] if err := testHeaderChainImport(chain, blockchain); err == nil { t.Errorf("broken header chain not reported") } @@ -633,32 +562,30 @@ func testBrokenChain(t *testing.T, full bool, scheme string, pipeline bool) { // Tests that reorganising a long difficult chain after a short easy one // overwrites the canonical numbers and links in the database. func TestReorgLongHeaders(t *testing.T) { - testReorgLong(t, false, rawdb.HashScheme, false) - testReorgLong(t, false, rawdb.PathScheme, false) + testReorgLong(t, false, rawdb.HashScheme) + testReorgLong(t, false, rawdb.PathScheme) } func TestReorgLongBlocks(t *testing.T) { - testReorgLong(t, true, rawdb.HashScheme, false) - testReorgLong(t, true, rawdb.PathScheme, false) - testReorgLong(t, true, rawdb.HashScheme, true) + testReorgLong(t, true, rawdb.HashScheme) + testReorgLong(t, true, rawdb.PathScheme) } -func testReorgLong(t *testing.T, full bool, scheme string, pipeline bool) { - testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280+params.GenesisDifficulty.Int64(), full, scheme, pipeline) +func testReorgLong(t *testing.T, full bool, scheme string) { + testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280+params.GenesisDifficulty.Int64(), full, scheme) } // Tests that reorganising a short difficult chain after a long easy one // overwrites the canonical numbers and links in the database. func TestReorgShortHeaders(t *testing.T) { - testReorgShort(t, false, rawdb.HashScheme, false) - testReorgShort(t, false, rawdb.PathScheme, false) + testReorgShort(t, false, rawdb.HashScheme) + testReorgShort(t, false, rawdb.PathScheme) } func TestReorgShortBlocks(t *testing.T) { - testReorgShort(t, true, rawdb.HashScheme, false) - testReorgShort(t, true, rawdb.PathScheme, false) - testReorgShort(t, true, rawdb.HashScheme, true) + testReorgShort(t, true, rawdb.HashScheme) + testReorgShort(t, true, rawdb.PathScheme) } -func testReorgShort(t *testing.T, full bool, scheme string, pipeline bool) { +func testReorgShort(t *testing.T, full bool, scheme string) { // Create a long easy chain vs. a short heavy one. Due to difficulty adjustment // we need a fairly long chain of blocks with different difficulties for a short // one to become heavier than a long one. The 96 is an empirical value. @@ -670,12 +597,12 @@ func testReorgShort(t *testing.T, full bool, scheme string, pipeline bool) { for i := 0; i < len(diff); i++ { diff[i] = -9 } - testReorg(t, easy, diff, 12615120+params.GenesisDifficulty.Int64(), full, scheme, pipeline) + testReorg(t, easy, diff, 12615120+params.GenesisDifficulty.Int64(), full, scheme) } -func testReorg(t *testing.T, first, second []int64, td int64, full bool, scheme string, pipeline bool) { +func testReorg(t *testing.T, first, second []int64, td int64, full bool, scheme string) { // Create a pristine chain and database - genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme, pipeline) + genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme) if err != nil { t.Fatalf("failed to create pristine chain: %v", err) } @@ -744,19 +671,18 @@ func testReorg(t *testing.T, first, second []int64, td int64, full bool, scheme // Tests that the insertion functions detect banned hashes. func TestBadHeaderHashes(t *testing.T) { - testBadHashes(t, false, rawdb.HashScheme, false) - testBadHashes(t, false, rawdb.PathScheme, false) + testBadHashes(t, false, rawdb.HashScheme) + testBadHashes(t, false, rawdb.PathScheme) } func TestBadBlockHashes(t *testing.T) { - testBadHashes(t, true, rawdb.HashScheme, false) - testBadHashes(t, true, rawdb.HashScheme, true) - testBadHashes(t, true, rawdb.PathScheme, false) + testBadHashes(t, true, rawdb.HashScheme) + testBadHashes(t, true, rawdb.PathScheme) } -func testBadHashes(t *testing.T, full bool, scheme string, pipeline bool) { +func testBadHashes(t *testing.T, full bool, scheme string) { // Create a pristine chain and database - genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme, pipeline) + genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme) if err != nil { t.Fatalf("failed to create pristine chain: %v", err) } @@ -786,18 +712,17 @@ func testBadHashes(t *testing.T, full bool, scheme string, pipeline bool) { // Tests that bad hashes are detected on boot, and the chain rolled back to a // good state prior to the bad hash. func TestReorgBadHeaderHashes(t *testing.T) { - testReorgBadHashes(t, false, rawdb.HashScheme, false) - testReorgBadHashes(t, false, rawdb.PathScheme, false) + testReorgBadHashes(t, false, rawdb.HashScheme) + testReorgBadHashes(t, false, rawdb.PathScheme) } func TestReorgBadBlockHashes(t *testing.T) { - testReorgBadHashes(t, true, rawdb.HashScheme, false) - testReorgBadHashes(t, true, rawdb.HashScheme, true) - testReorgBadHashes(t, true, rawdb.PathScheme, false) + testReorgBadHashes(t, true, rawdb.HashScheme) + testReorgBadHashes(t, true, rawdb.PathScheme) } -func testReorgBadHashes(t *testing.T, full bool, scheme string, pipeline bool) { +func testReorgBadHashes(t *testing.T, full bool, scheme string) { // Create a pristine chain and database - genDb, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme, pipeline) + genDb, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme) if err != nil { t.Fatalf("failed to create pristine chain: %v", err) } @@ -848,19 +773,18 @@ func testReorgBadHashes(t *testing.T, full bool, scheme string, pipeline bool) { // Tests chain insertions in the face of one entity containing an invalid nonce. func TestHeadersInsertNonceError(t *testing.T) { - testInsertNonceError(t, false, rawdb.HashScheme, false) - testInsertNonceError(t, false, rawdb.PathScheme, false) + testInsertNonceError(t, false, rawdb.HashScheme) + testInsertNonceError(t, false, rawdb.PathScheme) } func TestBlocksInsertNonceError(t *testing.T) { - testInsertNonceError(t, true, rawdb.HashScheme, false) - testInsertNonceError(t, true, rawdb.HashScheme, true) - testInsertNonceError(t, true, rawdb.PathScheme, false) + testInsertNonceError(t, true, rawdb.HashScheme) + testInsertNonceError(t, true, rawdb.PathScheme) } -func testInsertNonceError(t *testing.T, full bool, scheme string, pipeline bool) { +func testInsertNonceError(t *testing.T, full bool, scheme string) { doTest := func(i int) { // Create a pristine chain and database - genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme, pipeline) + genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme) if err != nil { t.Fatalf("failed to create pristine chain: %v", err) } @@ -1611,7 +1535,7 @@ func TestCanonicalBlockRetrieval(t *testing.T) { } func testCanonicalBlockRetrieval(t *testing.T, scheme string) { - _, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, true, scheme, false) + _, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, true, scheme) if err != nil { t.Fatalf("failed to create pristine chain: %v", err) } diff --git a/core/error.go b/core/error.go index 07ff770432..ca0f0b9d00 100644 --- a/core/error.go +++ b/core/error.go @@ -39,9 +39,6 @@ var ( // ErrCurrentBlockNotFound is returned when current block not found. ErrCurrentBlockNotFound = errors.New("current block not found") - - // ErrKnownBadBlock is return when the block is a known bad block - ErrKnownBadBlock = errors.New("already known bad block") ) // List of evm-call-message pre-checking errors. All state transition messages will diff --git a/core/state/snapshot/difflayer.go b/core/state/snapshot/difflayer.go index eb9fa2ed13..c12dd4c3ea 100644 --- a/core/state/snapshot/difflayer.go +++ b/core/state/snapshot/difflayer.go @@ -119,9 +119,6 @@ type diffLayer struct { storageList map[common.Hash][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil storageData map[common.Hash]map[common.Hash][]byte // Keyed storage slots for direct retrieval. one per account (nil means deleted) - verifiedCh chan struct{} // the difflayer is verified when verifiedCh is nil or closed - valid bool // mark the difflayer is valid or not. - diffed *bloomfilter.Filter // Bloom filter tracking all the diffed items up to the disk layer lock sync.RWMutex @@ -145,7 +142,7 @@ func storageBloomHash(h0, h1 common.Hash) uint64 { // newDiffLayer creates a new diff on top of an existing snapshot, whether that's a low // level persistent database or a hierarchical diff already. -func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte, verified chan struct{}) *diffLayer { +func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer { // Create the new layer with some pre-allocated data segments dl := &diffLayer{ parent: parent, @@ -154,7 +151,6 @@ func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]s accountData: accounts, storageData: storage, storageList: make(map[common.Hash][]common.Hash), - verifiedCh: verified, } switch parent := parent.(type) { @@ -236,39 +232,6 @@ func (dl *diffLayer) Root() common.Hash { return dl.root } -// WaitAndGetVerifyRes will wait until the diff layer been verified and return the verification result -func (dl *diffLayer) WaitAndGetVerifyRes() bool { - if dl.verifiedCh == nil { - return true - } - <-dl.verifiedCh - return dl.valid -} - -func (dl *diffLayer) MarkValid() { - dl.valid = true -} - -// Represent whether the difflayer is been verified, does not means it is a valid or invalid difflayer -func (dl *diffLayer) Verified() bool { - if dl.verifiedCh == nil { - return true - } - select { - case <-dl.verifiedCh: - return true - default: - return false - } -} - -func (dl *diffLayer) CorrectAccounts(accounts map[common.Hash][]byte) { - dl.lock.Lock() - defer dl.lock.Unlock() - - dl.accountData = accounts -} - // Parent returns the subsequent layer of a diff layer. func (dl *diffLayer) Parent() snapshot { dl.lock.RLock() @@ -467,8 +430,8 @@ func (dl *diffLayer) storage(accountHash, storageHash common.Hash, depth int) ([ // Update creates a new layer on top of the existing snapshot diff tree with // the specified data items. -func (dl *diffLayer) Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte, verified chan struct{}) *diffLayer { - return newDiffLayer(dl, blockRoot, destructs, accounts, storage, verified) +func (dl *diffLayer) Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer { + return newDiffLayer(dl, blockRoot, destructs, accounts, storage) } // flatten pushes all data from this point downwards, flattening everything into diff --git a/core/state/snapshot/difflayer_test.go b/core/state/snapshot/difflayer_test.go index 1c0844c44d..674a031b16 100644 --- a/core/state/snapshot/difflayer_test.go +++ b/core/state/snapshot/difflayer_test.go @@ -80,11 +80,11 @@ func TestMergeBasics(t *testing.T) { } } // Add some (identical) layers on top - parent := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage), nil) - child := newDiffLayer(parent, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage), nil) - child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage), nil) - child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage), nil) - child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage), nil) + parent := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) + child := newDiffLayer(parent, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) + child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) + child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) + child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) // And flatten merged := (child.flatten()).(*diffLayer) @@ -152,13 +152,13 @@ func TestMergeDelete(t *testing.T) { } } // Add some flipAccs-flopping layers on top - parent := newDiffLayer(emptyLayer(), common.Hash{}, flipDrops(), flipAccs(), storage, nil) - child := parent.Update(common.Hash{}, flopDrops(), flopAccs(), storage, nil) - child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage, nil) - child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage, nil) - child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage, nil) - child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage, nil) - child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage, nil) + parent := newDiffLayer(emptyLayer(), common.Hash{}, flipDrops(), flipAccs(), storage) + child := parent.Update(common.Hash{}, flopDrops(), flopAccs(), storage) + child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage) + child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage) + child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage) + child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage) + child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage) if data, _ := child.Account(h1); data == nil { t.Errorf("last diff layer: expected %x account to be non-nil", h1) @@ -210,7 +210,7 @@ func TestInsertAndMerge(t *testing.T) { accounts = make(map[common.Hash][]byte) storage = make(map[common.Hash]map[common.Hash][]byte) ) - parent = newDiffLayer(emptyLayer(), common.Hash{}, destructs, accounts, storage, nil) + parent = newDiffLayer(emptyLayer(), common.Hash{}, destructs, accounts, storage) } { var ( @@ -221,7 +221,7 @@ func TestInsertAndMerge(t *testing.T) { accounts[acc] = randomAccount() storage[acc] = make(map[common.Hash][]byte) storage[acc][slot] = []byte{0x01} - child = newDiffLayer(parent, common.Hash{}, destructs, accounts, storage, nil) + child = newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) } // And flatten merged := (child.flatten()).(*diffLayer) @@ -257,7 +257,7 @@ func BenchmarkSearch(b *testing.B) { for i := 0; i < 10000; i++ { accounts[randomHash()] = randomAccount() } - return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage, nil) + return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) } var layer snapshot layer = emptyLayer() @@ -299,7 +299,7 @@ func BenchmarkSearchSlot(b *testing.B) { accStorage[randomHash()] = value storage[accountKey] = accStorage } - return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage, nil) + return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) } var layer snapshot layer = emptyLayer() @@ -336,7 +336,7 @@ func BenchmarkFlatten(b *testing.B) { } storage[accountKey] = accStorage } - return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage, nil) + return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) } b.ResetTimer() for i := 0; i < b.N; i++ { @@ -385,7 +385,7 @@ func BenchmarkJournal(b *testing.B) { } storage[accountKey] = accStorage } - return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage, nil) + return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) } layer := snapshot(emptyLayer()) for i := 1; i < 128; i++ { diff --git a/core/state/snapshot/disklayer.go b/core/state/snapshot/disklayer.go index 58ce3e3657..1556439f23 100644 --- a/core/state/snapshot/disklayer.go +++ b/core/state/snapshot/disklayer.go @@ -60,19 +60,6 @@ func (dl *diskLayer) Root() common.Hash { return dl.root } -func (dl *diskLayer) WaitAndGetVerifyRes() bool { - return true -} - -func (dl *diskLayer) MarkValid() {} - -func (dl *diskLayer) Verified() bool { - return true -} - -func (dl *diskLayer) CorrectAccounts(map[common.Hash][]byte) { -} - // Parent always returns nil as there's no layer below the disk. func (dl *diskLayer) Parent() snapshot { return nil @@ -191,6 +178,6 @@ func (dl *diskLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro // Update creates a new layer on top of the existing snapshot diff tree with // the specified data items. Note, the maps are retained by the method to avoid // copying everything. -func (dl *diskLayer) Update(blockHash common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte, verified chan struct{}) *diffLayer { - return newDiffLayer(dl, blockHash, destructs, accounts, storage, verified) +func (dl *diskLayer) Update(blockHash common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer { + return newDiffLayer(dl, blockHash, destructs, accounts, storage) } diff --git a/core/state/snapshot/disklayer_test.go b/core/state/snapshot/disklayer_test.go index f524253875..168458c405 100644 --- a/core/state/snapshot/disklayer_test.go +++ b/core/state/snapshot/disklayer_test.go @@ -130,7 +130,7 @@ func TestDiskMerge(t *testing.T) { conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])}, conDelNoCache: {conDelNoCacheSlot: nil}, conDelCache: {conDelCacheSlot: nil}, - }, nil); err != nil { + }); err != nil { t.Fatalf("failed to update snapshot tree: %v", err) } if err := snaps.Cap(diffRoot, 0); err != nil { @@ -353,7 +353,7 @@ func TestDiskPartialMerge(t *testing.T) { conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])}, conDelNoCache: {conDelNoCacheSlot: nil}, conDelCache: {conDelCacheSlot: nil}, - }, nil); err != nil { + }); err != nil { t.Fatalf("test %d: failed to update snapshot tree: %v", i, err) } if err := snaps.Cap(diffRoot, 0); err != nil { @@ -464,7 +464,7 @@ func TestDiskGeneratorPersistence(t *testing.T) { // Modify or delete some accounts, flatten everything onto disk if err := snaps.Update(diffRoot, baseRoot, nil, map[common.Hash][]byte{ accTwo: accTwo[:], - }, nil, nil); err != nil { + }, nil); err != nil { t.Fatalf("failed to update snapshot tree: %v", err) } if err := snaps.Cap(diffRoot, 0); err != nil { @@ -484,7 +484,7 @@ func TestDiskGeneratorPersistence(t *testing.T) { accThree: accThree.Bytes(), }, map[common.Hash]map[common.Hash][]byte{ accThree: {accThreeSlot: accThreeSlot.Bytes()}, - }, nil); err != nil { + }); err != nil { t.Fatalf("failed to update snapshot tree: %v", err) } diskLayer := snaps.layers[snaps.diskRoot()].(*diskLayer) diff --git a/core/state/snapshot/iterator_test.go b/core/state/snapshot/iterator_test.go index 3e8a5878ef..54614427a5 100644 --- a/core/state/snapshot/iterator_test.go +++ b/core/state/snapshot/iterator_test.go @@ -54,7 +54,7 @@ func TestAccountIteratorBasics(t *testing.T) { } } // Add some (identical) layers on top - diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage), nil) + diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) it := diffLayer.AccountIterator(common.Hash{}) verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator @@ -92,7 +92,7 @@ func TestStorageIteratorBasics(t *testing.T) { nilStorage[h] = nilstorage } // Add some (identical) layers on top - diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, nil, copyAccounts(accounts), copyStorage(storage), nil) + diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, nil, copyAccounts(accounts), copyStorage(storage)) for account := range accounts { it, _ := diffLayer.StorageIterator(account, common.Hash{}) verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator @@ -223,13 +223,13 @@ func TestAccountIteratorTraversal(t *testing.T) { } // Stack three diff layers on top with various overlaps snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, - randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil) + randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil) snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, - randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil) + randomAccountSet("0xbb", "0xdd", "0xf0"), nil) snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, - randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil) + randomAccountSet("0xcc", "0xf0", "0xff"), nil) // Verify the single and multi-layer iterators head := snaps.Snapshot(common.HexToHash("0x04")) @@ -270,13 +270,13 @@ func TestStorageIteratorTraversal(t *testing.T) { } // Stack three diff layers on top with various overlaps snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, - randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil), nil) + randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil)) snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, - randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04", "0x05", "0x06"}}, nil), nil) + randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04", "0x05", "0x06"}}, nil)) snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, - randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil), nil) + randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil)) // Verify the single and multi-layer iterators head := snaps.Snapshot(common.HexToHash("0x04")) @@ -354,14 +354,14 @@ func TestAccountIteratorTraversalValues(t *testing.T) { } } // Assemble a stack of snapshots from the account layers - snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, a, nil, nil) - snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, b, nil, nil) - snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, c, nil, nil) - snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, d, nil, nil) - snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, e, nil, nil) - snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, f, nil, nil) - snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, g, nil, nil) - snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, h, nil, nil) + snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, a, nil) + snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, b, nil) + snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, c, nil) + snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, d, nil) + snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, e, nil) + snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, f, nil) + snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, g, nil) + snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, h, nil) it, _ := snaps.AccountIterator(common.HexToHash("0x09"), common.Hash{}) head := snaps.Snapshot(common.HexToHash("0x09")) @@ -453,14 +453,14 @@ func TestStorageIteratorTraversalValues(t *testing.T) { } } // Assemble a stack of snapshots from the account layers - snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, randomAccountSet("0xaa"), wrapStorage(a), nil) - snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, randomAccountSet("0xaa"), wrapStorage(b), nil) - snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, randomAccountSet("0xaa"), wrapStorage(c), nil) - snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, randomAccountSet("0xaa"), wrapStorage(d), nil) - snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, randomAccountSet("0xaa"), wrapStorage(e), nil) - snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, randomAccountSet("0xaa"), wrapStorage(e), nil) - snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, randomAccountSet("0xaa"), wrapStorage(g), nil) - snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, randomAccountSet("0xaa"), wrapStorage(h), nil) + snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, randomAccountSet("0xaa"), wrapStorage(a)) + snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, randomAccountSet("0xaa"), wrapStorage(b)) + snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, randomAccountSet("0xaa"), wrapStorage(c)) + snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, randomAccountSet("0xaa"), wrapStorage(d)) + snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, randomAccountSet("0xaa"), wrapStorage(e)) + snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, randomAccountSet("0xaa"), wrapStorage(e)) + snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, randomAccountSet("0xaa"), wrapStorage(g)) + snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, randomAccountSet("0xaa"), wrapStorage(h)) it, _ := snaps.StorageIterator(common.HexToHash("0x09"), common.HexToHash("0xaa"), common.Hash{}) head := snaps.Snapshot(common.HexToHash("0x09")) @@ -523,7 +523,7 @@ func TestAccountIteratorLargeTraversal(t *testing.T) { }, } for i := 1; i < 128; i++ { - snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil, nil) + snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil) } // Iterate the entire stack and ensure everything is hit only once head := snaps.Snapshot(common.HexToHash("0x80")) @@ -568,13 +568,13 @@ func TestAccountIteratorFlattening(t *testing.T) { } // Create a stack of diffs on top snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, - randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil) + randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil) snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, - randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil) + randomAccountSet("0xbb", "0xdd", "0xf0"), nil) snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, - randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil) + randomAccountSet("0xcc", "0xf0", "0xff"), nil) // Create an iterator and flatten the data from underneath it it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{}) @@ -599,13 +599,13 @@ func TestAccountIteratorSeek(t *testing.T) { }, } snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, - randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil) + randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil) snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, - randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil) + randomAccountSet("0xbb", "0xdd", "0xf0"), nil) snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, - randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil) + randomAccountSet("0xcc", "0xf0", "0xff"), nil) // Account set is now // 02: aa, ee, f0, ff @@ -663,13 +663,13 @@ func TestStorageIteratorSeek(t *testing.T) { } // Stack three diff layers on top with various overlaps snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, - randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil), nil) + randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil)) snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, - randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x05", "0x06"}}, nil), nil) + randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x05", "0x06"}}, nil)) snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, - randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x05", "0x08"}}, nil), nil) + randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x05", "0x08"}}, nil)) // Account set is now // 02: 01, 03, 05 @@ -726,17 +726,17 @@ func TestAccountIteratorDeletions(t *testing.T) { } // Stack three diff layers on top with various overlaps snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), - nil, randomAccountSet("0x11", "0x22", "0x33"), nil, nil) + nil, randomAccountSet("0x11", "0x22", "0x33"), nil) deleted := common.HexToHash("0x22") destructed := map[common.Hash]struct{}{ deleted: {}, } snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), - destructed, randomAccountSet("0x11", "0x33"), nil, nil) + destructed, randomAccountSet("0x11", "0x33"), nil) snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), - nil, randomAccountSet("0x33", "0x44", "0x55"), nil, nil) + nil, randomAccountSet("0x33", "0x44", "0x55"), nil) // The output should be 11,33,44,55 it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{}) @@ -772,10 +772,10 @@ func TestStorageIteratorDeletions(t *testing.T) { } // Stack three diff layers on top with various overlaps snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, - randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil), nil) + randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil)) snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, - randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x04", "0x06"}}, [][]string{{"0x01", "0x03"}}), nil) + randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x04", "0x06"}}, [][]string{{"0x01", "0x03"}})) // The output should be 02,04,05,06 it, _ := snaps.StorageIterator(common.HexToHash("0x03"), common.HexToHash("0xaa"), common.Hash{}) @@ -791,7 +791,7 @@ func TestStorageIteratorDeletions(t *testing.T) { destructed := map[common.Hash]struct{}{ common.HexToHash("0xaa"): {}, } - snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), destructed, nil, nil, nil) + snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), destructed, nil, nil) it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{}) verifyIterator(t, 0, it, verifyStorage) @@ -799,7 +799,7 @@ func TestStorageIteratorDeletions(t *testing.T) { // Re-insert the slots of the same account snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, - randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x07", "0x08", "0x09"}}, nil), nil) + randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x07", "0x08", "0x09"}}, nil)) // The output should be 07,08,09 it, _ = snaps.StorageIterator(common.HexToHash("0x05"), common.HexToHash("0xaa"), common.Hash{}) @@ -807,7 +807,7 @@ func TestStorageIteratorDeletions(t *testing.T) { it.Release() // Destruct the whole storage but re-create the account in the same layer - snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), destructed, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, nil), nil) + snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), destructed, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, nil)) it, _ = snaps.StorageIterator(common.HexToHash("0x06"), common.HexToHash("0xaa"), common.Hash{}) verifyIterator(t, 2, it, verifyStorage) // The output should be 11,12 it.Release() @@ -849,7 +849,7 @@ func BenchmarkAccountIteratorTraversal(b *testing.B) { }, } for i := 1; i <= 100; i++ { - snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil, nil) + snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil) } // We call this once before the benchmark, so the creation of // sorted accountlists are not included in the results. @@ -944,9 +944,9 @@ func BenchmarkAccountIteratorLargeBaselayer(b *testing.B) { base.root: base, }, } - snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, makeAccounts(2000), nil, nil) + snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, makeAccounts(2000), nil) for i := 2; i <= 100; i++ { - snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(20), nil, nil) + snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(20), nil) } // We call this once before the benchmark, so the creation of // sorted accountlists are not included in the results. diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go index cc60f79ce7..40fb51ae1d 100644 --- a/core/state/snapshot/journal.go +++ b/core/state/snapshot/journal.go @@ -110,7 +110,7 @@ func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, jou // etc.), we just discard all diffs and try to recover them later. var current snapshot = base err := iterateJournal(db, func(parent common.Hash, root common.Hash, destructSet map[common.Hash]struct{}, accountData map[common.Hash][]byte, storageData map[common.Hash]map[common.Hash][]byte) error { - current = newDiffLayer(current, root, destructSet, accountData, storageData, nil) + current = newDiffLayer(current, root, destructSet, accountData, storageData) return nil }) if err != nil { diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index 117504f8b6..b55d4d1df7 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -100,18 +100,6 @@ type Snapshot interface { // Root returns the root hash for which this snapshot was made. Root() common.Hash - // WaitAndGetVerifyRes will wait until the snapshot been verified and return verification result - WaitAndGetVerifyRes() bool - - // Verified returns whether the snapshot is verified - Verified() bool - - // MarkValid stores the verification result - MarkValid() - - // CorrectAccounts updates account data for storing the correct data during pipecommit - CorrectAccounts(map[common.Hash][]byte) - // Account directly retrieves the account associated with a particular hash in // the snapshot slim data format. Account(hash common.Hash) (*types.SlimAccount, error) @@ -142,7 +130,7 @@ type snapshot interface { // the specified data items. // // Note, the maps are retained by the method to avoid copying everything. - Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte, verified chan struct{}) *diffLayer + Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer // Journal commits an entire diff hierarchy to disk into a single journal entry. // This is meant to be used during shutdown to persist the snapshot without @@ -367,7 +355,7 @@ func (t *Tree) Snapshots(root common.Hash, limits int, nodisk bool) []Snapshot { // Update adds a new snapshot into the tree, if that can be linked to an existing // old parent. It is disallowed to insert a disk layer (the origin of all). -func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte, verified chan struct{}) error { +func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error { // Reject noop updates to avoid self-loops in the snapshot tree. This is a // special case that can only happen for Clique networks where empty blocks // don't modify the state (0 block subsidy). @@ -382,7 +370,7 @@ func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs m if parent == nil { return fmt.Errorf("parent [%#x] snapshot missing", parentRoot) } - snap := parent.(snapshot).Update(blockRoot, destructs, accounts, storage, verified) + snap := parent.(snapshot).Update(blockRoot, destructs, accounts, storage) // Save the new snapshot for later t.lock.Lock() @@ -708,11 +696,6 @@ func (t *Tree) Journal(root common.Hash) (common.Hash, error) { if snap == nil { return common.Hash{}, fmt.Errorf("snapshot [%#x] missing", root) } - // Wait the snapshot(difflayer) is verified, it means the account data also been refreshed with the correct data - if !snap.WaitAndGetVerifyRes() { - return common.Hash{}, ErrSnapshotStale - } - // Run the journaling t.lock.Lock() defer t.lock.Unlock() diff --git a/core/state/snapshot/snapshot_test.go b/core/state/snapshot/snapshot_test.go index 57a1d9f1b4..f6b5324e7b 100644 --- a/core/state/snapshot/snapshot_test.go +++ b/core/state/snapshot/snapshot_test.go @@ -107,7 +107,7 @@ func TestDiskLayerExternalInvalidationFullFlatten(t *testing.T) { accounts := map[common.Hash][]byte{ common.HexToHash("0xa1"): randomAccount(), } - if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil, nil); err != nil { + if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil { t.Fatalf("failed to create a diff layer: %v", err) } if n := len(snaps.layers); n != 2 { @@ -151,10 +151,10 @@ func TestDiskLayerExternalInvalidationPartialFlatten(t *testing.T) { accounts := map[common.Hash][]byte{ common.HexToHash("0xa1"): randomAccount(), } - if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil, nil); err != nil { + if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil { t.Fatalf("failed to create a diff layer: %v", err) } - if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil, nil); err != nil { + if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil { t.Fatalf("failed to create a diff layer: %v", err) } if n := len(snaps.layers); n != 3 { @@ -203,13 +203,13 @@ func TestDiffLayerExternalInvalidationPartialFlatten(t *testing.T) { accounts := map[common.Hash][]byte{ common.HexToHash("0xa1"): randomAccount(), } - if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil, nil); err != nil { + if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil { t.Fatalf("failed to create a diff layer: %v", err) } - if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil, nil); err != nil { + if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil { t.Fatalf("failed to create a diff layer: %v", err) } - if err := snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, accounts, nil, nil); err != nil { + if err := snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, accounts, nil); err != nil { t.Fatalf("failed to create a diff layer: %v", err) } if n := len(snaps.layers); n != 4 { @@ -263,12 +263,12 @@ func TestPostCapBasicDataAccess(t *testing.T) { }, } // The lowest difflayer - snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil, nil) - snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil, nil) - snaps.Update(common.HexToHash("0xb2"), common.HexToHash("0xa1"), nil, setAccount("0xb2"), nil, nil) + snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil) + snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil) + snaps.Update(common.HexToHash("0xb2"), common.HexToHash("0xa1"), nil, setAccount("0xb2"), nil) - snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil, nil) - snaps.Update(common.HexToHash("0xb3"), common.HexToHash("0xb2"), nil, setAccount("0xb3"), nil, nil) + snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil) + snaps.Update(common.HexToHash("0xb3"), common.HexToHash("0xb2"), nil, setAccount("0xb3"), nil) // checkExist verifies if an account exists in a snapshot checkExist := func(layer *diffLayer, key string) error { @@ -363,7 +363,7 @@ func TestSnaphots(t *testing.T) { ) for i := 0; i < 129; i++ { head = makeRoot(uint64(i + 2)) - snaps.Update(head, last, nil, setAccount(fmt.Sprintf("%d", i+2)), nil, nil) + snaps.Update(head, last, nil, setAccount(fmt.Sprintf("%d", i+2)), nil) last = head snaps.Cap(head, 128) // 130 layers (128 diffs + 1 accumulator + 1 disk) } @@ -456,9 +456,9 @@ func TestReadStateDuringFlattening(t *testing.T) { }, } // 4 layers in total, 3 diff layers and 1 disk layers - snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil, nil) - snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil, nil) - snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil, nil) + snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil) + snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil) + snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil) // Obtain the topmost snapshot handler for state accessing snap := snaps.Snapshot(common.HexToHash("0xa3")) diff --git a/core/state/statedb.go b/core/state/statedb.go index 525da23b0f..b1e16014df 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -35,7 +35,6 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/triestate" @@ -82,7 +81,6 @@ type StateDB struct { stateRoot common.Hash // The calculation result of IntermediateRoot fullProcessed bool - pipeCommit bool // These maps hold the state changes (including the corresponding // original value) that occurred in this **block**. @@ -197,8 +195,7 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) } tr, err := db.OpenTrie(root) - // return error when 1. failed to open trie and 2. the snap is nil or the snap is not nil and done verification - if err != nil && (sdb.snap == nil || sdb.snap.Verified()) { + if err != nil { return nil, err } _, sdb.noTrie = tr.(*trie.EmptyTrie) @@ -300,20 +297,6 @@ func (s *StateDB) SetExpectedStateRoot(root common.Hash) { s.expectedRoot = root } -// Enable the pipeline commit function of statedb -func (s *StateDB) EnablePipeCommit() { - if s.snap != nil && s.snaps.Layers() > 1 { - // after big merge, disable pipeCommit for now, - // because `s.db.TrieDB().Update` should be called after `s.trie.Commit(true)` - s.pipeCommit = false - } -} - -// IsPipeCommit checks whether pipecommit is enabled on the statedb or not -func (s *StateDB) IsPipeCommit() bool { - return s.pipeCommit -} - // Mark that the block is full processed func (s *StateDB) MarkFullProcessed() { s.fullProcessed = true @@ -335,22 +318,6 @@ func (s *StateDB) Error() error { return s.dbErr } -// Not thread safe -func (s *StateDB) Trie() (Trie, error) { - if s.trie == nil { - err := s.WaitPipeVerification() - if err != nil { - return nil, err - } - tr, err := s.db.OpenTrie(s.originalRoot) - if err != nil { - return nil, err - } - s.trie = tr - } - return s.trie, nil -} - func (s *StateDB) AddLog(log *types.Log) { s.journal.append(addLogChange{txhash: s.thash}) @@ -867,8 +834,7 @@ func (s *StateDB) copyInternal(doPrefetch bool) *StateDB { // expectedRoot: s.expectedRoot, // stateRoot: s.stateRoot, originalRoot: s.originalRoot, - // fullProcessed: s.fullProcessed, - // pipeCommit: s.pipeCommit, + // fullProcessed: s.fullProcessed, accounts: make(map[common.Hash][]byte), storages: make(map[common.Hash]map[common.Hash][]byte), accountsOrigin: make(map[common.Address][]byte), @@ -999,17 +965,6 @@ func (s *StateDB) GetRefund() uint64 { return s.refund } -// WaitPipeVerification waits until the snapshot been verified -func (s *StateDB) WaitPipeVerification() error { - // Need to wait for the parent trie to commit - if s.snap != nil { - if valid := s.snap.WaitAndGetVerifyRes(); !valid { - return errors.New("verification on parent snap failed") - } - } - return nil -} - // Finalise finalises the state by removing the destructed objects and clears // the journal as well as the refunds. Finalise, however, will not push any updates // into the tries just yet. Only IntermediateRoot or Commit will do that. @@ -1056,11 +1011,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { } prefetcher := s.prefetcher if prefetcher != nil && len(addressesToPrefetch) > 0 { - if s.snap.Verified() { - prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch) - } else if prefetcher.rootParent != (common.Hash{}) { - prefetcher.prefetch(common.Hash{}, prefetcher.rootParent, common.Address{}, addressesToPrefetch) - } + prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch) } // Invalidate journal because reverting across transactions is not allowed. s.clearJournalAndRefund() @@ -1076,76 +1027,6 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { return s.StateIntermediateRoot() } -// CorrectAccountsRoot will fix account roots in pipecommit mode -func (s *StateDB) CorrectAccountsRoot(blockRoot common.Hash) { - var snapshot snapshot.Snapshot - if blockRoot == (common.Hash{}) { - snapshot = s.snap - } else if s.snaps != nil { - snapshot = s.snaps.Snapshot(blockRoot) - } - - if snapshot == nil { - return - } - if accounts, err := snapshot.Accounts(); err == nil && accounts != nil { - for _, obj := range s.stateObjects { - if !obj.deleted { - if account, exist := accounts[crypto.Keccak256Hash(obj.address[:])]; exist { - if len(account.Root) == 0 { - obj.data.Root = types.EmptyRootHash - } else { - obj.data.Root = common.BytesToHash(account.Root) - } - } - } - } - } -} - -// PopulateSnapAccountAndStorage tries to populate required accounts and storages for pipecommit -func (s *StateDB) PopulateSnapAccountAndStorage() { - for addr := range s.stateObjectsPending { - if obj := s.stateObjects[addr]; !obj.deleted { - if s.snap != nil { - s.populateSnapStorage(obj) - s.accounts[obj.addrHash] = types.SlimAccountRLP(obj.data) - } - } - } -} - -// populateSnapStorage tries to populate required storages for pipecommit, and returns a flag to indicate whether the storage root changed or not -func (s *StateDB) populateSnapStorage(obj *stateObject) bool { - for key, value := range obj.dirtyStorage { - obj.pendingStorage[key] = value - } - if len(obj.pendingStorage) == 0 { - return false - } - hasher := crypto.NewKeccakState() - var storage map[common.Hash][]byte - for key, value := range obj.pendingStorage { - var v []byte - if (value != common.Hash{}) { - // Encoding []byte cannot fail, ok to ignore the error. - v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:])) - } - // If state snapshotting is active, cache the data til commit - if obj.db.snap != nil { - if storage == nil { - // Retrieve the old storage map, if available, create a new one otherwise - if storage = obj.db.storages[obj.addrHash]; storage == nil { - storage = make(map[common.Hash][]byte) - obj.db.storages[obj.addrHash] = storage - } - } - storage[crypto.HashData(hasher, key[:])] = v // v will be nil if value is 0x00 - } - } - return true -} - func (s *StateDB) AccountsIntermediateRoot() { tasks := make(chan func()) finishCh := make(chan struct{}) @@ -1482,7 +1363,7 @@ func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.A // // The associated block number of the state transition is also provided // for more chain context. -func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFuncs ...func() error) (common.Hash, *types.DiffLayer, error) { +func (s *StateDB) Commit(block uint64, postCommitFuncs ...func() error) (common.Hash, *types.DiffLayer, error) { // Short circuit in case any database failure occurred earlier. if s.dbErr != nil { s.StopPrefetcher() @@ -1490,38 +1371,17 @@ func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFunc } // Finalize any pending changes and merge everything into the tries var ( - diffLayer *types.DiffLayer - verified chan struct{} - snapUpdated chan struct{} - incomplete map[common.Address]struct{} - nodes = trienode.NewMergedNodeSet() + diffLayer *types.DiffLayer + incomplete map[common.Address]struct{} + nodes = trienode.NewMergedNodeSet() ) if s.snap != nil { diffLayer = &types.DiffLayer{} } - if s.pipeCommit { - // async commit the MPT - verified = make(chan struct{}) - snapUpdated = make(chan struct{}) - } commmitTrie := func() error { commitErr := func() error { - if s.pipeCommit { - <-snapUpdated - // Due to state verification pipeline, the accounts roots are not updated, leading to the data in the difflayer is not correct, capture the correct data here - s.AccountsIntermediateRoot() - if parent := s.snap.Root(); parent != s.expectedRoot { - accountData := make(map[common.Hash][]byte) - for k, v := range s.accounts { - accountData[crypto.Keccak256Hash(k[:])] = v - } - s.snaps.Snapshot(s.expectedRoot).CorrectAccounts(accountData) - } - s.snap = nil - } - if s.stateRoot = s.StateIntermediateRoot(); s.fullProcessed && s.expectedRoot != s.stateRoot { log.Error("Invalid merkle root", "remote", s.expectedRoot, "local", s.stateRoot) return fmt.Errorf("invalid merkle root (remote: %x local: %x)", s.expectedRoot, s.stateRoot) @@ -1628,30 +1488,10 @@ func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFunc } } } - - for _, postFunc := range postCommitFuncs { - err := postFunc() - if err != nil { - return err - } - } wg.Wait() return nil }() - if s.pipeCommit { - if commitErr == nil { - s.snaps.Snapshot(s.stateRoot).MarkValid() - close(verified) - } else { - // The blockchain will do the further rewind if write block not finish yet - close(verified) - if failPostCommitFunc != nil { - failPostCommitFunc() - } - log.Error("state verification failed", "err", commitErr) - } - } return commitErr } @@ -1693,15 +1533,10 @@ func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFunc if metrics.EnabledExpensive { defer func(start time.Time) { s.SnapshotCommits += time.Since(start) }(time.Now()) } - if s.pipeCommit { - defer close(snapUpdated) - // State verification pipeline - accounts root are not calculated here, just populate needed fields for process - s.PopulateSnapAccountAndStorage() - } diffLayer.Destructs, diffLayer.Accounts, diffLayer.Storages = s.SnapToDiffLayer() // Only update if there's a state transition (skip empty Clique blocks) if parent := s.snap.Root(); parent != s.expectedRoot { - err := s.snaps.Update(s.expectedRoot, parent, s.convertAccountSet(s.stateObjectsDestruct), s.accounts, s.storages, verified) + err := s.snaps.Update(s.expectedRoot, parent, s.convertAccountSet(s.stateObjectsDestruct), s.accounts, s.storages) if err != nil { log.Warn("Failed to update snapshot tree", "from", parent, "to", s.expectedRoot, "err", err) @@ -1721,12 +1556,9 @@ func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFunc return nil }, } - if s.pipeCommit { - go commmitTrie() - } else { - defer s.StopPrefetcher() - commitFuncs = append(commitFuncs, commmitTrie) - } + + defer s.StopPrefetcher() + commitFuncs = append(commitFuncs, commmitTrie) commitRes := make(chan error, len(commitFuncs)) for _, f := range commitFuncs { // commitFuncs[0] and commitFuncs[1] both read map `stateObjects`, but no conflicts @@ -1743,11 +1575,7 @@ func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFunc } root := s.stateRoot - if s.pipeCommit { - root = s.expectedRoot - } else { - s.snap = nil - } + s.snap = nil if root == (common.Hash{}) { root = types.EmptyRootHash } diff --git a/eth/backend.go b/eth/backend.go index 56add41cc0..14053f1450 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -296,9 +296,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { } ) bcOps := make([]core.BlockChainOption, 0) - if config.PipeCommit { - bcOps = append(bcOps, core.EnablePipelineCommit) - } if config.PersistDiff { bcOps = append(bcOps, core.EnablePersistDiff(config.DiffBlock)) } diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 697a9fb91f..74bea35b97 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -107,7 +107,6 @@ type Config struct { DirectBroadcast bool DisableSnapProtocol bool // Whether disable snap protocol EnableTrustProtocol bool // Whether enable trust protocol - PipeCommit bool RangeLimit bool // Deprecated, use 'TransactionHistory' instead. diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go index 2d86b97ab1..ac79812ac7 100644 --- a/eth/ethconfig/gen_config.go +++ b/eth/ethconfig/gen_config.go @@ -30,7 +30,6 @@ func (c Config) MarshalTOML() (interface{}, error) { DirectBroadcast bool DisableSnapProtocol bool EnableTrustProtocol bool - PipeCommit bool RangeLimit bool TxLookupLimit uint64 `toml:",omitempty"` TransactionHistory uint64 `toml:",omitempty"` @@ -90,7 +89,6 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.DirectBroadcast = c.DirectBroadcast enc.DisableSnapProtocol = c.DisableSnapProtocol enc.EnableTrustProtocol = c.EnableTrustProtocol - enc.PipeCommit = c.PipeCommit enc.RangeLimit = c.RangeLimit enc.TxLookupLimit = c.TxLookupLimit enc.TransactionHistory = c.TransactionHistory @@ -154,7 +152,6 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { DirectBroadcast *bool DisableSnapProtocol *bool EnableTrustProtocol *bool - PipeCommit *bool RangeLimit *bool TxLookupLimit *uint64 `toml:",omitempty"` TransactionHistory *uint64 `toml:",omitempty"` @@ -243,9 +240,6 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.EnableTrustProtocol != nil { c.EnableTrustProtocol = *dec.EnableTrustProtocol } - if dec.PipeCommit != nil { - c.PipeCommit = *dec.PipeCommit - } if dec.RangeLimit != nil { c.RangeLimit = *dec.RangeLimit } diff --git a/miner/worker.go b/miner/worker.go index 8513b6a9b6..135dc45327 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -1430,15 +1430,6 @@ func (w *worker) commit(env *environment, interval func(), update bool, start ti if interval != nil { interval() } - /* - - err := env.state.WaitPipeVerification() - if err != nil { - return err - } - env.state.CorrectAccountsRoot(w.chain.CurrentBlock().Root) - */ - fees := env.state.GetBalance(consensus.SystemAddress).ToBig() feesInEther := new(big.Float).Quo(new(big.Float).SetInt(fees), big.NewFloat(params.Ether)) // Withdrawals are set to nil here, because this is only called in PoW. diff --git a/triedb/hashdb/database.go b/triedb/hashdb/database.go index 7fd9b06c19..a2c8e977d4 100644 --- a/triedb/hashdb/database.go +++ b/triedb/hashdb/database.go @@ -347,33 +347,27 @@ func (db *Database) Cap(limit common.StorageSize) error { // Keep committing nodes from the flush-list until we're below allowance oldest := db.oldest - err := func() error { - for size > limit && oldest != (common.Hash{}) { - // Fetch the oldest referenced node and push into the batch - node := db.dirties[oldest] - rawdb.WriteLegacyTrieNode(batch, oldest, node.node) - - // If we exceeded the ideal batch size, commit and reset - if batch.ValueSize() >= ethdb.IdealBatchSize { - if err := batch.Write(); err != nil { - log.Error("Failed to write flush list to disk", "err", err) - return err - } - batch.Reset() - } - // Iterate to the next flush item, or abort if the size cap was achieved. Size - // is the total size, including the useful cached data (hash -> blob), the - // cache item metadata, as well as external children mappings. - size -= common.StorageSize(common.HashLength + len(node.node) + cachedNodeSize) - if node.external != nil { - size -= common.StorageSize(len(node.external) * common.HashLength) + for size > limit && oldest != (common.Hash{}) { + // Fetch the oldest referenced node and push into the batch + node := db.dirties[oldest] + rawdb.WriteLegacyTrieNode(batch, oldest, node.node) + + // If we exceeded the ideal batch size, commit and reset + if batch.ValueSize() >= ethdb.IdealBatchSize { + if err := batch.Write(); err != nil { + log.Error("Failed to write flush list to disk", "err", err) + return err } - oldest = node.flushNext + batch.Reset() } - return nil - }() - if err != nil { - return err + // Iterate to the next flush item, or abort if the size cap was achieved. Size + // is the total size, including the useful cached data (hash -> blob), the + // cache item metadata, as well as external children mappings. + size -= common.StorageSize(common.HashLength + len(node.node) + cachedNodeSize) + if node.external != nil { + size -= common.StorageSize(len(node.external) * common.HashLength) + } + oldest = node.flushNext } // Flush out any remainder data from the last batch if err := batch.Write(); err != nil {