From 53d6ed6f35f6811d04adea298fe67b95328fe2ad Mon Sep 17 00:00:00 2001 From: joeylichang Date: Fri, 6 Dec 2024 10:04:37 +0800 Subject: [PATCH 01/13] feat: async verify and commit --- core/blockchain.go | 236 +++++++++++++++++++------------ core/state/snapshot/difflayer.go | 2 + core/state/snapshot/snapshot.go | 21 ++- core/state/statedb.go | 25 +++- 4 files changed, 187 insertions(+), 97 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 1d3ffb12e7..88a88ee638 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -227,6 +227,13 @@ type txLookup struct { transaction *types.Transaction } +type VerifyTask struct { + block *types.Block + state *state.StateDB + receipts types.Receipts + usedGas uint64 +} + // BlockChain represents the canonical chain given a database with a genesis // block. The Blockchain manages chain imports, reverts, chain reorganisations. // @@ -313,6 +320,8 @@ type BlockChain struct { // monitor doubleSignMonitor *monitor.DoubleSignMonitor + + verifyTaskCh chan *VerifyTask } // NewBlockChain returns a fully initialised block chain using information @@ -377,6 +386,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis vmConfig: vmConfig, diffQueue: prque.New[int64, *types.DiffLayer](nil), diffQueueBuffer: make(chan *types.DiffLayer), + verifyTaskCh: make(chan *VerifyTask, 1024), } bc.flushInterval.Store(int64(cacheConfig.TrieTimeLimit)) bc.forker = NewForkChoice(bc, shouldPreserve) @@ -1779,92 +1789,116 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. wg.Done() }() - tryCommitTrieDB := func() error { - bc.commitLock.Lock() - defer bc.commitLock.Unlock() - - // If node is running in path mode, skip explicit gc operation - // which is unnecessary in this mode. - if bc.triedb.Scheme() == rawdb.PathScheme { - return nil - } - - triedb := bc.stateCache.TrieDB() - // If we're running an archive node, always flush - if bc.cacheConfig.TrieDirtyDisabled { - return triedb.Commit(block.Root(), false) - } - // Full but not archive node, do proper garbage collection - triedb.Reference(block.Root(), common.Hash{}) // metadata reference to keep trie alive - bc.triegc.Push(block.Root(), -int64(block.NumberU64())) + //tryCommitTrieDB := func() error { + // bc.commitLock.Lock() + // defer bc.commitLock.Unlock() + // + // // If node is running in path mode, skip explicit gc operation + // // which is unnecessary in this mode. + // if bc.triedb.Scheme() == rawdb.PathScheme { + // return nil + // } + // + // triedb := bc.stateCache.TrieDB() + // // If we're running an archive node, always flush + // if bc.cacheConfig.TrieDirtyDisabled { + // return triedb.Commit(block.Root(), false) + // } + // // Full but not archive node, do proper garbage collection + // triedb.Reference(block.Root(), common.Hash{}) // metadata reference to keep trie alive + // bc.triegc.Push(block.Root(), -int64(block.NumberU64())) + // + // // Flush limits are not considered for the first TriesInMemory blocks. + // current := block.NumberU64() + // if current <= bc.TriesInMemory() { + // return nil + // } + // // If we exceeded our memory allowance, flush matured singleton nodes to disk + // var ( + // _, nodes, _, imgs = triedb.Size() + // limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024 + // ) + // if nodes > limit || imgs > 4*1024*1024 { + // triedb.Cap(limit - ethdb.IdealBatchSize) + // } + // // Find the next state trie we need to commit + // chosen := current - bc.triesInMemory + // flushInterval := time.Duration(bc.flushInterval.Load()) + // // If we exceeded out time allowance, flush an entire trie to disk + // if bc.gcproc > flushInterval { + // canWrite := true + // if posa, ok := bc.engine.(consensus.PoSA); ok { + // if !posa.EnoughDistance(bc, block.Header()) { + // canWrite = false + // } + // } + // if canWrite { + // // If the header is missing (canonical chain behind), we're reorging a low + // // diff sidechain. Suspend committing until this operation is completed. + // header := bc.GetHeaderByNumber(chosen) + // if header == nil { + // log.Warn("Reorg in progress, trie commit postponed", "number", chosen) + // } else { + // // If we're exceeding limits but haven't reached a large enough memory gap, + // // warn the user that the system is becoming unstable. + // if chosen < bc.lastWrite+bc.triesInMemory && bc.gcproc >= 2*flushInterval { + // log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", flushInterval, "optimum", float64(chosen-bc.lastWrite)/float64(bc.triesInMemory)) + // } + // // Flush an entire trie and restart the counters + // triedb.Commit(header.Root, true) + // rawdb.WriteSafePointBlockNumber(bc.db, chosen) + // bc.lastWrite = chosen + // bc.gcproc = 0 + // } + // } + // } + // // Garbage collect anything below our required write retention + // wg2 := sync.WaitGroup{} + // for !bc.triegc.Empty() { + // root, number := bc.triegc.Pop() + // if uint64(-number) > chosen { + // bc.triegc.Push(root, number) + // break + // } + // wg2.Add(1) + // go func() { + // triedb.Dereference(root) + // wg2.Done() + // }() + // } + // wg2.Wait() + // return nil + //} + //// Commit all cached state changes into underlying memory database. + //_, diffLayer, err := state.Commit(block.NumberU64(), tryCommitTrieDB) + //if err != nil { + // return err + //} + // + //// Ensure no empty block body + //if diffLayer != nil && block.Header().TxHash != types.EmptyRootHash { + // // Filling necessary field + // diffLayer.Receipts = receipts + // diffLayer.BlockHash = block.Hash() + // diffLayer.Number = block.NumberU64() + // + // diffLayerCh := make(chan struct{}) + // if bc.diffLayerChanCache.Len() >= diffLayerCacheLimit { + // bc.diffLayerChanCache.RemoveOldest() + // } + // bc.diffLayerChanCache.Add(diffLayer.BlockHash, diffLayerCh) + // + // go bc.cacheDiffLayer(diffLayer, diffLayerCh) + //} + wg.Wait() + return nil +} - // Flush limits are not considered for the first TriesInMemory blocks. - current := block.NumberU64() - if current <= bc.TriesInMemory() { - return nil - } - // If we exceeded our memory allowance, flush matured singleton nodes to disk - var ( - _, nodes, _, imgs = triedb.Size() - limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024 - ) - if nodes > limit || imgs > 4*1024*1024 { - triedb.Cap(limit - ethdb.IdealBatchSize) - } - // Find the next state trie we need to commit - chosen := current - bc.triesInMemory - flushInterval := time.Duration(bc.flushInterval.Load()) - // If we exceeded out time allowance, flush an entire trie to disk - if bc.gcproc > flushInterval { - canWrite := true - if posa, ok := bc.engine.(consensus.PoSA); ok { - if !posa.EnoughDistance(bc, block.Header()) { - canWrite = false - } - } - if canWrite { - // If the header is missing (canonical chain behind), we're reorging a low - // diff sidechain. Suspend committing until this operation is completed. - header := bc.GetHeaderByNumber(chosen) - if header == nil { - log.Warn("Reorg in progress, trie commit postponed", "number", chosen) - } else { - // If we're exceeding limits but haven't reached a large enough memory gap, - // warn the user that the system is becoming unstable. - if chosen < bc.lastWrite+bc.triesInMemory && bc.gcproc >= 2*flushInterval { - log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", flushInterval, "optimum", float64(chosen-bc.lastWrite)/float64(bc.triesInMemory)) - } - // Flush an entire trie and restart the counters - triedb.Commit(header.Root, true) - rawdb.WriteSafePointBlockNumber(bc.db, chosen) - bc.lastWrite = chosen - bc.gcproc = 0 - } - } - } - // Garbage collect anything below our required write retention - wg2 := sync.WaitGroup{} - for !bc.triegc.Empty() { - root, number := bc.triegc.Pop() - if uint64(-number) > chosen { - bc.triegc.Push(root, number) - break - } - wg2.Add(1) - go func() { - triedb.Dereference(root) - wg2.Done() - }() - } - wg2.Wait() - return nil - } - // Commit all cached state changes into underlying memory database. - _, diffLayer, err := state.Commit(block.NumberU64(), tryCommitTrieDB) +func (bc *BlockChain) commitState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) error { + _, diffLayer, err := state.Commit(block.NumberU64(), nil) if err != nil { return err } - // Ensure no empty block body if diffLayer != nil && block.Header().TxHash != types.EmptyRootHash { // Filling necessary field @@ -1880,7 +1914,6 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. go bc.cacheDiffLayer(diffLayer, diffLayerCh) } - wg.Wait() return nil } @@ -2246,15 +2279,24 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) return it.index, err } ptime := time.Since(pstart) + statedb.CommitUnVerifiedSnapDifflayer(bc.chainConfig.IsEIP158(block.Number())) - // Validate the state using the default validator vstart := time.Now() - if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil { - log.Error("validate state failed", "error", err) - bc.reportBlock(block, receipts, err) - statedb.StopPrefetcher() - return it.index, err + task := &VerifyTask{ + block: block, + state: statedb, + receipts: receipts, + usedGas: usedGas, } + bc.verifyTaskCh <- task + // Validate the state using the default validator + //vstart := time.Now() + //if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil { + // log.Error("validate state failed", "error", err) + // bc.reportBlock(block, receipts, err) + // statedb.StopPrefetcher() + // return it.index, err + //} vtime := time.Since(vstart) proctime := time.Since(start) // processing + validation @@ -2376,6 +2418,22 @@ func (bc *BlockChain) updateHighestVerifiedHeader(header *types.Header) { } } +func (bc *BlockChain) VerifyLoop() { + for { + select { + case <-bc.quit: + return + case task := <-bc.verifyTaskCh: + if err := bc.validator.ValidateState(task.block, task.state, task.receipts, task.usedGas); err != nil { + log.Crit("validate state failed", "error", err) + } + if err := bc.commitState(task.block, task.receipts, task.state); err != nil { + log.Crit("commit state failed", "error", err) + } + } + } +} + func (bc *BlockChain) GetHighestVerifiedHeader() *types.Header { return bc.highestVerifiedHeader.Load() } diff --git a/core/state/snapshot/difflayer.go b/core/state/snapshot/difflayer.go index c12dd4c3ea..9deaade303 100644 --- a/core/state/snapshot/difflayer.go +++ b/core/state/snapshot/difflayer.go @@ -122,6 +122,8 @@ type diffLayer struct { diffed *bloomfilter.Filter // Bloom filter tracking all the diffed items up to the disk layer lock sync.RWMutex + + verified atomic.Bool } // destructBloomHash is used to convert a destruct event into a 64 bit mini hash. diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index b55d4d1df7..101318a7b3 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -355,7 +355,7 @@ func (t *Tree) Snapshots(root common.Hash, limits int, nodisk bool) []Snapshot { // Update adds a new snapshot into the tree, if that can be linked to an existing // old parent. It is disallowed to insert a disk layer (the origin of all). -func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error { +func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte, verified bool) error { // Reject noop updates to avoid self-loops in the snapshot tree. This is a // special case that can only happen for Clique networks where empty blocks // don't modify the state (0 block subsidy). @@ -372,6 +372,10 @@ func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs m } snap := parent.(snapshot).Update(blockRoot, destructs, accounts, storage) + if verified { + snap.verified.Store(verified) + } + // Save the new snapshot for later t.lock.Lock() defer t.lock.Unlock() @@ -480,6 +484,18 @@ func (t *Tree) Cap(root common.Hash, layers int) error { // survival is only known *after* capping, we need to omit it from the count if // we want to ensure that *at least* the requested number of diff layers remain. func (t *Tree) cap(diff *diffLayer, layers int) *diskLayer { + for { + if parent, ok := diff.parent.(*diffLayer); ok { + if !parent.verified.Load() { + diff = parent + } else { + break + } + } else { + return nil + } + } + // Dive until we run out of layers or reach the persistent database for i := 0; i < layers-1; i++ { // If we still have diff layers below, continue down @@ -527,6 +543,9 @@ func (t *Tree) cap(diff *diffLayer, layers int) *diskLayer { } // If the bottom-most layer is larger than our memory cap, persist to disk bottom := diff.parent.(*diffLayer) + if bottom.verified.Load() == false { + return nil + } bottom.lock.RLock() base := diffToDisk(bottom) diff --git a/core/state/statedb.go b/core/state/statedb.go index b1eeb2e8f1..f1007765d3 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -160,12 +160,12 @@ type StateDB struct { // NewWithSharedPool creates a new state with sharedStorge on layer 1.5 func NewWithSharedPool(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) { - statedb, err := New(root, db, snaps) - if err != nil { - return nil, err - } - statedb.storagePool = NewStoragePool() - return statedb, nil + //statedb, err := New(root, db, snaps) + //if err != nil { + // return nil, err + //} + //statedb.storagePool = NewStoragePool() + return New(root, db, snaps) } // New creates a new state from a given trie. @@ -1543,7 +1543,7 @@ func (s *StateDB) Commit(block uint64, postCommitFunc func() error) (common.Hash diffLayer.Destructs, diffLayer.Accounts, diffLayer.Storages = s.SnapToDiffLayer() // Only update if there's a state transition (skip empty Clique blocks) if parent := s.snap.Root(); parent != s.expectedRoot { - err := s.snaps.Update(s.expectedRoot, parent, s.convertAccountSet(s.stateObjectsDestruct), s.accounts, s.storages) + err := s.snaps.Update(s.expectedRoot, parent, s.convertAccountSet(s.stateObjectsDestruct), s.accounts, s.storages, true) if err != nil { log.Warn("Failed to update snapshot tree", "from", parent, "to", s.expectedRoot, "err", err) @@ -1596,6 +1596,17 @@ func (s *StateDB) Commit(block uint64, postCommitFunc func() error) (common.Hash return root, diffLayer, nil } +func (s *StateDB) CommitUnVerifiedSnapDifflayer(deleteEmptyObjects bool) { + s.Finalise(deleteEmptyObjects) + if parent := s.snap.Root(); parent != s.expectedRoot { + err := s.snaps.Update(s.expectedRoot, parent, s.convertAccountSet(s.stateObjectsDestruct), s.accounts, s.storages, false) + + if err != nil { + log.Warn("Failed to update snapshot tree", "from", parent, "to", s.expectedRoot, "err", err) + } + } +} + func (s *StateDB) SnapToDiffLayer() ([]common.Address, []types.DiffAccount, []types.DiffStorage) { destructs := make([]common.Address, 0, len(s.stateObjectsDestruct)) for account := range s.stateObjectsDestruct { From 91a1b4c93eac9a0d7ae917b3d745a4b4b3a4aa2b Mon Sep 17 00:00:00 2001 From: joeylichang Date: Fri, 6 Dec 2024 10:33:44 +0800 Subject: [PATCH 02/13] chore: forbidden txpool and blobpool --- eth/backend.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/eth/backend.go b/eth/backend.go index 556af40b4a..fe6760f5dc 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -37,8 +37,6 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state/pruner" "github.com/ethereum/go-ethereum/core/txpool" - "github.com/ethereum/go-ethereum/core/txpool/blobpool" - "github.com/ethereum/go-ethereum/core/txpool/legacypool" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vote" @@ -318,14 +316,15 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { if config.BlobPool.Datadir != "" { config.BlobPool.Datadir = stack.ResolvePath(config.BlobPool.Datadir) } - blobPool := blobpool.New(config.BlobPool, eth.blockchain) + //blobPool := blobpool.New(config.BlobPool, eth.blockchain) if config.TxPool.Journal != "" { config.TxPool.Journal = stack.ResolvePath(config.TxPool.Journal) } - legacyPool := legacypool.New(config.TxPool, eth.blockchain) + //legacyPool := legacypool.New(config.TxPool, eth.blockchain) - eth.txPool, err = txpool.New(config.TxPool.PriceLimit, eth.blockchain, []txpool.SubPool{legacyPool, blobPool}) + //eth.txPool, err = txpool.New(config.TxPool.PriceLimit, eth.blockchain, []txpool.SubPool{legacyPool, blobPool}) + eth.txPool, err = txpool.New(config.TxPool.PriceLimit, eth.blockchain, []txpool.SubPool{}) if err != nil { return nil, err } From 8f5ac08fc3cddeac1e6428b9c8771737d235c0a0 Mon Sep 17 00:00:00 2001 From: joeylichang Date: Fri, 6 Dec 2024 10:57:16 +0800 Subject: [PATCH 03/13] fix: active verify loop --- core/blockchain.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/core/blockchain.go b/core/blockchain.go index 88a88ee638..5f5df8d829 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -581,6 +581,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis if txLookupLimit != nil { bc.txIndexer = newTxIndexer(*txLookupLimit, bc) } + go bc.VerifyLoop() return bc, nil } @@ -2316,6 +2317,13 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) blockExecutionTimer.Update(ptime - trieRead) // The time spent on EVM processing blockValidationTimer.Update(vtime - (triehash + trieUpdate)) // The time spent on block validation + err = bc.writeBlockWithState(block, receipts, statedb) + if err != nil { + log.Crit("Failed to write block", "number", block.Number(), "hash", block.Hash(), "err", err) + } + //} + //return it.index, err + // Write the block to the chain and get the status. var ( wstart = time.Now() @@ -2430,6 +2438,7 @@ func (bc *BlockChain) VerifyLoop() { if err := bc.commitState(task.block, task.receipts, task.state); err != nil { log.Crit("commit state failed", "error", err) } + //status, err = bc.writeBlockAndSetHead(task.block, task.receipts, task.logs, task.state, false) } } } From 7f8183c5bfc570aa98bc0e0b665387d42b24c0db Mon Sep 17 00:00:00 2001 From: joeylichang Date: Fri, 6 Dec 2024 11:22:22 +0800 Subject: [PATCH 04/13] fix: write head block --- core/blockchain.go | 120 +++++++++++++++++++++++---------------------- 1 file changed, 62 insertions(+), 58 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 5f5df8d829..6164d3385a 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -232,6 +232,7 @@ type VerifyTask struct { state *state.StateDB receipts types.Receipts usedGas uint64 + logs []*types.Log } // BlockChain represents the canonical chain given a database with a genesis @@ -2071,6 +2072,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) ) // Fire a single chain head event if we've progressed the chain defer func() { + lastCanon = nil if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() { bc.chainHeadFeed.Send(ChainHeadEvent{lastCanon}) if posa, ok := bc.Engine().(consensus.PoSA); ok { @@ -2288,6 +2290,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) state: statedb, receipts: receipts, usedGas: usedGas, + logs: logs, } bc.verifyTaskCh <- task // Validate the state using the default validator @@ -2321,23 +2324,21 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) if err != nil { log.Crit("Failed to write block", "number", block.Number(), "hash", block.Hash(), "err", err) } - //} - //return it.index, err // Write the block to the chain and get the status. - var ( - wstart = time.Now() - status WriteStatus - ) - if !setHead { - // Don't set the head, only insert the block - err = bc.writeBlockWithState(block, receipts, statedb) - } else { - status, err = bc.writeBlockAndSetHead(block, receipts, logs, statedb, false) - } - if err != nil { - return it.index, err - } + //var ( + // wstart = time.Now() + // status WriteStatus + //) + //if !setHead { + // // Don't set the head, only insert the block + // err = bc.writeBlockWithState(block, receipts, statedb) + //} else { + // status, err = bc.writeBlockAndSetHead(block, receipts, logs, statedb, false) + //} + //if err != nil { + // return it.index, err + //} // Update the metrics touched during block commit accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them @@ -2345,7 +2346,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them triedbCommitTimer.Update(statedb.TrieDBCommits) // Trie database commits are complete, we can mark them - blockWriteTimer.Update(time.Since(wstart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits - statedb.TrieDBCommits) + //blockWriteTimer.Update(time.Since(wstart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits - statedb.TrieDBCommits) blockInsertTimer.UpdateSince(start) // Report the import stats before returning the various results @@ -2357,7 +2358,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) snapDiffItems, snapBufItems, _ = bc.snaps.Size() } trieDiffNodes, trieBufNodes, trieImmutableBufNodes, _ := bc.triedb.Size() - stats.report(chain, it.index, snapDiffItems, snapBufItems, trieDiffNodes, trieBufNodes, trieImmutableBufNodes, status == CanonStatTy) + stats.report(chain, it.index, snapDiffItems, snapBufItems, trieDiffNodes, trieBufNodes, trieImmutableBufNodes, true) if !setHead { // After merge we expect few side chains. Simply count @@ -2366,49 +2367,49 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) return it.index, nil // Direct block insertion of a single block } - switch status { - case CanonStatTy: - log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), - "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(), - "elapsed", common.PrettyDuration(time.Since(start)), - "root", block.Root()) - - lastCanon = block - - // Only count canonical blocks for GC processing time - bc.gcproc += proctime - - case SideStatTy: - log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), - "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), - "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()), - "root", block.Root()) - - default: - // This in theory is impossible, but lets be nice to our future selves and leave - // a log, instead of trying to track down blocks imports that don't emit logs. - log.Warn("Inserted block with unknown status", "number", block.Number(), "hash", block.Hash(), - "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), - "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()), - "root", block.Root()) - } - bc.chainBlockFeed.Send(ChainHeadEvent{block}) + //switch status { + //case CanonStatTy: + // log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), + // "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(), + // "elapsed", common.PrettyDuration(time.Since(start)), + // "root", block.Root()) + // + // lastCanon = block + // + // // Only count canonical blocks for GC processing time + // bc.gcproc += proctime + // + //case SideStatTy: + // log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), + // "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), + // "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()), + // "root", block.Root()) + // + //default: + // // This in theory is impossible, but lets be nice to our future selves and leave + // // a log, instead of trying to track down blocks imports that don't emit logs. + // log.Warn("Inserted block with unknown status", "number", block.Number(), "hash", block.Hash(), + // "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), + // "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()), + // "root", block.Root()) + //} + //bc.chainBlockFeed.Send(ChainHeadEvent{block}) } // Any blocks remaining here? The only ones we care about are the future ones - if block != nil && errors.Is(err, consensus.ErrFutureBlock) { - if err := bc.addFutureBlock(block); err != nil { - return it.index, err - } - block, err = it.next() - - for ; block != nil && errors.Is(err, consensus.ErrUnknownAncestor); block, err = it.next() { - if err := bc.addFutureBlock(block); err != nil { - return it.index, err - } - stats.queued++ - } - } + //if block != nil && errors.Is(err, consensus.ErrFutureBlock) { + // if err := bc.addFutureBlock(block); err != nil { + // return it.index, err + // } + // block, err = it.next() + // + // for ; block != nil && errors.Is(err, consensus.ErrUnknownAncestor); block, err = it.next() { + // if err := bc.addFutureBlock(block); err != nil { + // return it.index, err + // } + // stats.queued++ + // } + //} stats.ignored += it.remaining() return it.index, err @@ -2438,7 +2439,10 @@ func (bc *BlockChain) VerifyLoop() { if err := bc.commitState(task.block, task.receipts, task.state); err != nil { log.Crit("commit state failed", "error", err) } - //status, err = bc.writeBlockAndSetHead(task.block, task.receipts, task.logs, task.state, false) + if _, err := bc.writeBlockAndSetHead(task.block, task.receipts, task.logs, task.state, false); err != nil { + log.Crit("write block and set head failed", "error", err) + } + bc.chainBlockFeed.Send(ChainHeadEvent{task.block}) } } } From 55566804eda7a90c9994793e731833890f03cdbc Mon Sep 17 00:00:00 2001 From: joeylichang Date: Fri, 6 Dec 2024 14:27:25 +0800 Subject: [PATCH 05/13] fix: delay open trie --- core/block_validator.go | 18 +++++++++--------- core/state/statedb.go | 19 +++++++++++++------ 2 files changed, 22 insertions(+), 15 deletions(-) diff --git a/core/block_validator.go b/core/block_validator.go index 6b292ddbe4..c65ff2c4ed 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -128,15 +128,15 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error { } return nil }, - func() error { - if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) { - if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) { - return consensus.ErrUnknownAncestor - } - return consensus.ErrPrunedAncestor - } - return nil - }, + //func() error { + // if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) { + // if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) { + // return consensus.ErrUnknownAncestor + // } + // return consensus.ErrPrunedAncestor + // } + // return nil + //}, func() error { if v.remoteValidator != nil && !v.remoteValidator.AncestorVerified(block.Header()) { return fmt.Errorf("%w, number: %s, hash: %s", ErrAncestorHasNotBeenVerified, block.Number(), block.Hash()) diff --git a/core/state/statedb.go b/core/state/statedb.go index f1007765d3..171db44893 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -194,12 +194,12 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) sdb.snap = sdb.snaps.Snapshot(root) } - tr, err := db.OpenTrie(root) - if err != nil { - return nil, err - } - _, sdb.noTrie = tr.(*trie.EmptyTrie) - sdb.trie = tr + //tr, err := db.OpenTrie(root) + //if err != nil { + // return nil, err + //} + //_, sdb.noTrie = tr.(*trie.EmptyTrie) + //sdb.trie = tr return sdb, nil } @@ -1023,6 +1023,13 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { // Finalise all the dirty storage states and write them into the tries s.Finalise(deleteEmptyObjects) + + tr, err := s.db.OpenTrie(s.originalRoot) + if err != nil { + panic("Failed to open state trie") + } + s.trie = tr + s.AccountsIntermediateRoot() return s.StateIntermediateRoot() } From e6042bddb96a1b49dffefe712cd6391bf4dafd50 Mon Sep 17 00:00:00 2001 From: joeylichang Date: Fri, 6 Dec 2024 14:53:29 +0800 Subject: [PATCH 06/13] fix: add statedb data after execute --- core/state/state_object.go | 28 ++++++++++++++++++++++++++++ core/state/statedb.go | 17 ++++++++++++++++- 2 files changed, 44 insertions(+), 1 deletion(-) diff --git a/core/state/state_object.go b/core/state/state_object.go index 3f2c8e9786..c7f409eb91 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -616,3 +616,31 @@ func (s *stateObject) Nonce() uint64 { func (s *stateObject) Root() common.Hash { return s.data.Root } + +func (s *stateObject) GetPendingStorages() map[common.Hash][]byte { + var ( + hasher = crypto.NewKeccakState() + ) + if len(s.pendingStorage) > 0 { + dirtyStorage := make(map[common.Hash][]byte) + for key, value := range s.pendingStorage { + // Skip noop changes, persist actual changes + if value == s.originStorage[key] { + continue + } + var v []byte + if value != (common.Hash{}) { + value := value + v = common.TrimLeftZeroes(value[:]) + } + // rlp-encoded value to be used by the snapshot + var encoded []byte + if len(v) != 0 { + encoded, _ = rlp.EncodeToBytes(v) + } + dirtyStorage[crypto.HashData(hasher, key[:])] = encoded + } + return dirtyStorage + } + return nil +} diff --git a/core/state/statedb.go b/core/state/statedb.go index 171db44893..98ecff6db0 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -1605,8 +1605,23 @@ func (s *StateDB) Commit(block uint64, postCommitFunc func() error) (common.Hash func (s *StateDB) CommitUnVerifiedSnapDifflayer(deleteEmptyObjects bool) { s.Finalise(deleteEmptyObjects) + destructs := make(map[common.Hash]struct{}) + accounts := make(map[common.Hash][]byte) + storages := make(map[common.Hash]map[common.Hash][]byte) + for addr := range s.stateObjectsPending { + if obj := s.stateObjects[addr]; !obj.deleted { + accounts[obj.addrHash] = types.SlimAccountRLP(obj.data) + pendingstorages := obj.GetPendingStorages() + if pendingstorages != nil { + storages[obj.addrHash] = pendingstorages + } + } else { + destructs[obj.addrHash] = struct{}{} + } + } + if parent := s.snap.Root(); parent != s.expectedRoot { - err := s.snaps.Update(s.expectedRoot, parent, s.convertAccountSet(s.stateObjectsDestruct), s.accounts, s.storages, false) + err := s.snaps.Update(s.expectedRoot, parent, destructs, accounts, storages, false) if err != nil { log.Warn("Failed to update snapshot tree", "from", parent, "to", s.expectedRoot, "err", err) From d6d959572f859e4df50371ff911258e6981de823 Mon Sep 17 00:00:00 2001 From: flywukong <2229306838@qq.com> Date: Tue, 10 Dec 2024 02:19:44 +0000 Subject: [PATCH 07/13] add compare and debug infor --- core/block_validator.go | 2 +- core/blockchain.go | 28 ++++-- core/genesis.go | 2 + core/state/state_object.go | 19 ++++ core/state/statedb.go | 180 +++++++++++++++++++++++++++++++------ 5 files changed, 195 insertions(+), 36 deletions(-) diff --git a/core/block_validator.go b/core/block_validator.go index c65ff2c4ed..32f9eb8474 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -187,7 +187,7 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD } validateFuns = append(validateFuns, func() error { if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root { - return fmt.Errorf("invalid merkle root (remote: %x local: %x) dberr: %w", header.Root, root, statedb.Error()) + return fmt.Errorf("invalid merkle root (block: %d block_hash: %x remote: %x local: %x) dberr: %w", block.Number(), header.Hash(), header.Root, root, statedb.Error()) } return nil }) diff --git a/core/blockchain.go b/core/blockchain.go index 6164d3385a..fdc4b7a2a1 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -387,7 +387,8 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis vmConfig: vmConfig, diffQueue: prque.New[int64, *types.DiffLayer](nil), diffQueueBuffer: make(chan *types.DiffLayer), - verifyTaskCh: make(chan *VerifyTask, 1024), + // verifyTaskCh: make(chan *VerifyTask, 1024), + verifyTaskCh: make(chan *VerifyTask, 1), } bc.flushInterval.Store(int64(cacheConfig.TrieTimeLimit)) bc.forker = NewForkChoice(bc, shouldPreserve) @@ -2242,7 +2243,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) } // Retrieve the parent block and it's state to execute on top - start := time.Now() + // start := time.Now() parent := it.previous() if parent == nil { parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1) @@ -2268,12 +2269,12 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) // 2.do trie prefetch for MPT trie node cache // it is for the big state trie tree, prefetch based on transaction's From/To address. // trie prefetcher is thread safe now, ok to prefetch in a separate routine - go throwaway.TriePrefetchInAdvance(block, signer) + // go throwaway.TriePrefetchInAdvance(block, signer) } // Process block using the parent state as reference point statedb.SetExpectedStateRoot(block.Root()) - pstart := time.Now() + // pstart := time.Now() statedb, receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig) close(interruptCh) // state prefetch can be stopped if err != nil { @@ -2281,10 +2282,22 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) statedb.StopPrefetcher() return it.index, err } - ptime := time.Since(pstart) +// ptime := time.Since(pstart) statedb.CommitUnVerifiedSnapDifflayer(bc.chainConfig.IsEIP158(block.Number())) + log.Info("Richard: successfully process", " block=", block.Number()) - vstart := time.Now() + // Add to cache + bc.blockCache.Add(block.Hash(), block) + bc.hc.numberCache.Add(block.Hash(), block.NumberU64()) + bc.hc.headerCache.Add(block.Hash(), block.Header()) + + ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) + // Make sure no inconsistent state is leaked during insertion + externTd := new(big.Int).Add(block.Difficulty(), ptd) + + bc.hc.tdCache.Add(block.Hash(), externTd) + +// vstart := time.Now() task := &VerifyTask{ block: block, state: statedb, @@ -2293,6 +2306,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) logs: logs, } bc.verifyTaskCh <- task +/* // Validate the state using the default validator //vstart := time.Now() //if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil { @@ -2394,6 +2408,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) // "root", block.Root()) //} //bc.chainBlockFeed.Send(ChainHeadEvent{block}) +*/ } // Any blocks remaining here? The only ones we care about are the future ones @@ -2443,6 +2458,7 @@ func (bc *BlockChain) VerifyLoop() { log.Crit("write block and set head failed", "error", err) } bc.chainBlockFeed.Send(ChainHeadEvent{task.block}) + log.Info("Richard: successfully verify", "block=", task.block.Number()) } } } diff --git a/core/genesis.go b/core/genesis.go index e06938f327..2664c0a4f6 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -141,6 +141,7 @@ func hashAlloc(ga *types.GenesisAlloc, isVerkle bool) (common.Hash, error) { statedb.SetState(addr, key, value) } } + statedb.CommitUnVerifiedSnapDifflayer(false) statedb.IntermediateRoot(false) root, _, err := statedb.Commit(0, nil) return root, err @@ -168,6 +169,7 @@ func flushAlloc(ga *types.GenesisAlloc, db ethdb.Database, triedb *triedb.Databa statedb.SetState(addr, key, value) } } + statedb.CommitUnVerifiedSnapDifflayer(false) statedb.IntermediateRoot(false) root, _, err := statedb.Commit(0, nil) if err != nil { diff --git a/core/state/state_object.go b/core/state/state_object.go index c7f409eb91..2b4302401c 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -411,6 +411,25 @@ func (s *stateObject) updateTrie() (Trie, error) { } } } + + // compare the storge + if r_storage, ok := s.db.r_storages[s.addrHash]; !ok && len(storage)>0 { + panic(fmt.Sprintf("Richard: state object, can't find the changed storages for addr=%x", s.addrHash)) + } else { + if len(storage) != len(r_storage) { + panic(fmt.Sprintf("Richard: state obj changes not the same len, len_r=%d len=%d", len(r_storage), len(storage))) + } + for k,v := range storage { + if r_v, ok := r_storage[k]; !ok { + panic(fmt.Sprintf("Richard: not found k=%x for addr=%x", k, s.addrHash)) + } else { + if !bytes.Equal(v, r_v) { + panic(fmt.Sprintf("Richard: mismatch value(r_v=%x v=%x) for key=%x addr=%x", r_v, v, k, s.addrHash)) + } + } + } + // log.Info("Richard: the same state changes for addr", "addr=", s.addrHash) + } }() wg.Wait() diff --git a/core/state/statedb.go b/core/state/statedb.go index 98ecff6db0..ed8b3cba60 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -24,7 +24,9 @@ import ( "sort" "sync" "time" + "bytes" + "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/gopool" "github.com/ethereum/go-ethereum/core/rawdb" @@ -91,6 +93,10 @@ type StateDB struct { accountsOrigin map[common.Address][]byte // The original value of mutated accounts in 'slim RLP' encoding storagesOrigin map[common.Address]map[common.Hash][]byte // The original value of mutated slots in prefix-zero trimmed rlp format + r_destructs map[common.Hash]struct{} + r_accounts map[common.Hash][]byte + r_storages map[common.Hash]map[common.Hash][]byte + // This map holds 'live' objects, which will get modified while processing // a state transition. stateObjects map[common.Address]*stateObject @@ -200,6 +206,8 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) //} //_, sdb.noTrie = tr.(*trie.EmptyTrie) //sdb.trie = tr + sdb.noTrie = false + return sdb, nil } @@ -1024,11 +1032,13 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { // Finalise all the dirty storage states and write them into the tries s.Finalise(deleteEmptyObjects) - tr, err := s.db.OpenTrie(s.originalRoot) - if err != nil { - panic("Failed to open state trie") + if s.trie == nil { + tr, err := s.db.OpenTrie(s.originalRoot) + if err != nil { + panic("Richard: Failed to open state trie") + } + s.trie = tr } - s.trie = tr s.AccountsIntermediateRoot() return s.StateIntermediateRoot() @@ -1052,6 +1062,7 @@ func (s *StateDB) AccountsIntermediateRoot() { }() } + var updateAccountNum int // Although naively it makes sense to retrieve the account trie and then do // the contract storage and account updates sequentially, that short circuits // the account prefetcher. Instead, let's process all the storage updates @@ -1059,6 +1070,7 @@ func (s *StateDB) AccountsIntermediateRoot() { // to pull useful data from disk. for addr := range s.stateObjectsPending { if obj := s.stateObjects[addr]; !obj.deleted { + updateAccountNum++ wg.Add(1) tasks <- func() { obj.updateRoot() @@ -1075,9 +1087,16 @@ func (s *StateDB) AccountsIntermediateRoot() { } } } + if len(s.r_accounts) != updateAccountNum { + log.Info("Richard: not the same number", "r_a_num=", len(s.r_accounts), " updateAccountNum=", updateAccountNum) + } wg.Wait() } +func (s *StateDB) GetModifiedStorages() *map[common.Hash]map[common.Hash][]byte { + return &s.r_storages +} + func (s *StateDB) StateIntermediateRoot() common.Hash { // If there was a trie prefetcher operating, it gets aborted and irrevocably // modified after we start retrieving tries. Remove it from the statedb after @@ -1097,27 +1116,62 @@ func (s *StateDB) StateIntermediateRoot() common.Hash { s.trie = trie } } - if s.trie == nil { - tr, err := s.db.OpenTrie(s.originalRoot) - if err != nil { - panic(fmt.Sprintf("failed to open trie tree %s", s.originalRoot)) - } - s.trie = tr - } usedAddrs := make([][]byte, 0, len(s.stateObjectsPending)) + accountNum := 0 + destructNum := 0 if !s.noTrie { + if len(s.stateObjectsPending) > 0 && len(s.stateObjectsPending) != len(s.r_accounts) { + panic(fmt.Sprintf("Richard: not the same len, pend_len= %d r_acc_len=%d", len(s.stateObjectsPending), len(s.r_accounts))) + } for addr := range s.stateObjectsPending { if obj := s.stateObjects[addr]; obj.deleted { + log.Info("Richard: delete", " addr=", addr) s.deleteStateObject(obj) + destructNum = destructNum + 1 + if _, exist := s.r_destructs[crypto.Keccak256Hash(addr[:])]; !exist{ + panic(fmt.Sprintf("failed to find destruct account %x", addr)) + } } else { + // log.Info("Richard: update", " addr=", addr) s.updateStateObject(obj) + accountNum = accountNum + 1 + if r_acc_d, exist := s.accounts[crypto.Keccak256Hash(addr[:])]; !exist{ + panic(fmt.Sprintf("failed to find account %x", addr)) + } else { + r_acc := new(types.SlimAccount) + if err := rlp.DecodeBytes(r_acc_d, r_acc); err != nil { + panic(err) + } + acc := new(types.SlimAccount) + if err := rlp.DecodeBytes(r_acc_d, acc); err != nil { + panic(err) + } + + if r_acc.Nonce != obj.data.Nonce { + log.Crit("Richard:", "mismatch nonce for addr=", addr, " r_n=", r_acc.Nonce, " n=", obj.data.Nonce) + } + if !(new(uint256.Int).Sub(r_acc.Balance, obj.data.Balance).IsZero()) { + log.Crit("Richard:", "mismatch balance for addr=", addr, " r_b=", r_acc.Balance, " b=", obj.data.Balance) + } + if !bytes.Equal(r_acc.CodeHash, obj.data.CodeHash) { + if bytes.Equal(obj.data.CodeHash, types.EmptyCodeHash[:]) { + // log.Warn("Richard: empty code hash") + } else { + log.Crit("Richard:", "mismatch codehash for addr=", addr, " r_c=", r_acc.CodeHash, " c=", obj.data.CodeHash) + } + } + } } usedAddrs = append(usedAddrs, common.CopyBytes(addr[:])) // Copy needed for closure } if prefetcher != nil { prefetcher.used(common.Hash{}, s.originalRoot, usedAddrs) } + + if (len(s.r_destructs) != destructNum || len(s.r_accounts) != accountNum) && len(s.stateObjectsPending) != 0 { + log.Info("Richard:", "r_de_len=", len(s.r_destructs), " destructNum=", destructNum, " r_ac_len=", len(s.r_accounts), " accountsN=", accountNum) + } } if len(s.stateObjectsPending) > 0 { @@ -1371,6 +1425,7 @@ func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.A // The associated block number of the state transition is also provided // for more chain context. func (s *StateDB) Commit(block uint64, postCommitFunc func() error) (common.Hash, *types.DiffLayer, error) { + // log.Info("Richard: start to commit state", "block=", block) // Short circuit in case any database failure occurred earlier. if s.dbErr != nil { s.StopPrefetcher() @@ -1486,6 +1541,7 @@ func (s *StateDB) Commit(block uint64, postCommitFunc func() error) (common.Hash if err := s.db.TrieDB().Update(root, origin, block, nodes, set); err != nil { return err } + // log.Info("Richard:", "commit done for block=", block) s.originalRoot = root if metrics.EnabledExpensive { s.TrieDBCommits += time.Since(start) @@ -1542,6 +1598,7 @@ func (s *StateDB) Commit(block uint64, postCommitFunc func() error) (common.Hash return nil }, func() error { + // log.Info("Richard: start to update snapshot", " block=", block) // If snapshotting is enabled, update the snapshot tree with this new version if s.snap != nil { if metrics.EnabledExpensive { @@ -1550,7 +1607,64 @@ func (s *StateDB) Commit(block uint64, postCommitFunc func() error) (common.Hash diffLayer.Destructs, diffLayer.Accounts, diffLayer.Storages = s.SnapToDiffLayer() // Only update if there's a state transition (skip empty Clique blocks) if parent := s.snap.Root(); parent != s.expectedRoot { + // log.Info("Richard: start to update and verify the diff") err := s.snaps.Update(s.expectedRoot, parent, s.convertAccountSet(s.stateObjectsDestruct), s.accounts, s.storages, true) + // compare + if len(s.r_destructs) != len(s.convertAccountSet(s.stateObjectsDestruct)) || len(s.accounts) != len(s.r_accounts) || len(s.storages) != len(s.r_storages) { + panic(fmt.Sprintf("Richard: no the same len, len_r_d=%d len_d=%d len_r_a=%d len_a=%d len_r_s=%d len_s=%d", len(s.r_destructs), len(s.convertAccountSet(s.stateObjectsDestruct)), len(s.r_accounts), len(s.accounts), len(s.r_storages), len(s.storages) )) + } + for addr := range s.convertAccountSet(s.stateObjectsDestruct) { + if _, ok := s.r_destructs[addr]; !ok { + panic(fmt.Sprintf("Richard: r_destructs has no addr=%x", addr)) + } + } + for addr, acc_d := range s.accounts { + if r_acc_d, ok := s.r_accounts[addr]; !ok { + panic(fmt.Sprintf("Richard: r_accounts has no addr=%x", addr)) + } else { + // if !bytes.Equal(acc, r_acc) { + // panic(fmt.Sprintf("Richard: accounts mismatch for addr=%x", addr)) + // } + r_acc := new(types.SlimAccount) + if err := rlp.DecodeBytes(r_acc_d, r_acc); err != nil { + panic(err) + } + acc := new(types.SlimAccount) + if err := rlp.DecodeBytes(acc_d, acc); err != nil { + panic(err) + } + + if r_acc.Nonce != acc.Nonce { + log.Crit("Richard:", "mismatch nonce for addr=", addr, " r_n=", r_acc.Nonce, " n=", acc.Nonce) + } + if !(new(uint256.Int).Sub(r_acc.Balance, acc.Balance).IsZero()) { + log.Crit("Richard:", "mismatch balance for addr=", addr, " r_b=", r_acc.Balance, " b=", acc.Balance) + } + if !bytes.Equal(r_acc.CodeHash, acc.CodeHash) { + if bytes.Equal(acc.CodeHash, types.EmptyCodeHash[:]) { + // log.Info("Richard: empty code hash") + } else { + log.Crit("Richard:", "mismatch codehash for addr=", addr, " r_c=", r_acc.CodeHash, " c=", acc.CodeHash) + } + } + } + } + for addr, storage := range s.storages { + if r_storage, ok := s.r_storages[addr]; !ok { + panic(fmt.Sprintf("Richard: r_storages has no addr=%x", addr)) + } else { + for k, v := range storage { + if r_v, ok := r_storage[k]; !ok { + panic(fmt.Sprintf("Richard: no storage for addr=%x k=%x", addr, k)) + } else { + if !bytes.Equal(v, r_v) { + panic(fmt.Sprintf("Richard: mismatch storage for addr=%x k=%x v=%x r_v=%x", addr, k, v, r_v)) + } + } + } + } + } + log.Info("Richard: commit successfully with the same created diff for block", " block=", block) if err != nil { log.Warn("Failed to update snapshot tree", "from", parent, "to", s.expectedRoot, "err", err) @@ -1565,6 +1679,8 @@ func (s *StateDB) Commit(block uint64, postCommitFunc func() error) (common.Hash log.Warn("Failed to cap snapshot tree", "root", s.expectedRoot, "layers", s.snaps.CapLimit(), "err", err) } }() + } else { + log.Info("Richard: not to update and verify diff", "parent=", parent) } } return nil @@ -1594,37 +1710,43 @@ func (s *StateDB) Commit(block uint64, postCommitFunc func() error) (common.Hash root = types.EmptyRootHash } // Clear all internal flags at the end of commit operation. + s.r_destructs = make(map[common.Hash]struct{}) + s.r_accounts = make(map[common.Hash][]byte) + s.r_storages = make(map[common.Hash]map[common.Hash][]byte) + s.accounts = make(map[common.Hash][]byte) s.storages = make(map[common.Hash]map[common.Hash][]byte) s.accountsOrigin = make(map[common.Address][]byte) s.storagesOrigin = make(map[common.Address]map[common.Hash][]byte) s.stateObjectsDirty = make(map[common.Address]struct{}) s.stateObjectsDestruct = make(map[common.Address]*types.StateAccount) + log.Info("Richard: successfully commit state", "block=", block) return root, diffLayer, nil } func (s *StateDB) CommitUnVerifiedSnapDifflayer(deleteEmptyObjects bool) { s.Finalise(deleteEmptyObjects) - destructs := make(map[common.Hash]struct{}) - accounts := make(map[common.Hash][]byte) - storages := make(map[common.Hash]map[common.Hash][]byte) - for addr := range s.stateObjectsPending { - if obj := s.stateObjects[addr]; !obj.deleted { - accounts[obj.addrHash] = types.SlimAccountRLP(obj.data) - pendingstorages := obj.GetPendingStorages() - if pendingstorages != nil { - storages[obj.addrHash] = pendingstorages - } - } else { - destructs[obj.addrHash] = struct{}{} - } - } - if parent := s.snap.Root(); parent != s.expectedRoot { - err := s.snaps.Update(s.expectedRoot, parent, destructs, accounts, storages, false) + s.r_destructs = s.convertAccountSet(s.stateObjectsDestruct) + s.r_accounts = make(map[common.Hash][]byte) + s.r_storages = make(map[common.Hash]map[common.Hash][]byte) - if err != nil { - log.Warn("Failed to update snapshot tree", "from", parent, "to", s.expectedRoot, "err", err) + for addr := range s.stateObjectsPending { + if obj := s.stateObjects[addr]; !obj.deleted { + s.r_accounts[obj.addrHash] = types.SlimAccountRLP(obj.data) + pendingstorages := obj.GetPendingStorages() + if pendingstorages != nil { + s.r_storages[obj.addrHash] = pendingstorages + } + } + } + + if s.snap != nil { + if parent := s.snap.Root(); parent != s.expectedRoot { + err := s.snaps.Update(s.expectedRoot, parent, s.r_destructs, s.r_accounts, s.r_storages, false) + if err != nil { + log.Warn("Failed to update snapshot tree", "from", parent, "to", s.expectedRoot, "err", err) + } } } } From e43c66e4dfc0b2b7f0338773c7b80ea23857d661 Mon Sep 17 00:00:00 2001 From: flywukong <2229306838@qq.com> Date: Tue, 10 Dec 2024 08:42:27 +0000 Subject: [PATCH 08/13] get verified state root for CA before update CA trie --- core/state/statedb.go | 32 +++++++++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index ed8b3cba60..92115818fd 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -1044,6 +1044,29 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { return s.StateIntermediateRoot() } +func (s *StateDB) GetLatestVerifiedStateRoot(addrHash common.Hash) common.Hash { + if s.snaps != nil { + s.snap = s.snaps.Snapshot(s.originalRoot) + acc, err := s.snap.Account(addrHash) + if err == nil { + if acc == nil { + return types.EmptyRootHash + } + data := &types.StateAccount{ + Nonce: acc.Nonce, + Balance: acc.Balance, + CodeHash: acc.CodeHash, + Root: common.BytesToHash(acc.Root), + } + if data.Root == (common.Hash{}) { + data.Root = types.EmptyRootHash + } + return data.Root + } + } + return types.EmptyRootHash +} + func (s *StateDB) AccountsIntermediateRoot() { tasks := make(chan func()) finishCh := make(chan struct{}) @@ -1073,6 +1096,7 @@ func (s *StateDB) AccountsIntermediateRoot() { updateAccountNum++ wg.Add(1) tasks <- func() { + obj.data.Root = s.GetLatestVerifiedStateRoot(obj.addrHash) obj.updateRoot() // Cache the data until commit. Note, this update mechanism is not symmetric @@ -1121,14 +1145,16 @@ func (s *StateDB) StateIntermediateRoot() common.Hash { accountNum := 0 destructNum := 0 if !s.noTrie { + log.Info("richard: start to state intermediate root") if len(s.stateObjectsPending) > 0 && len(s.stateObjectsPending) != len(s.r_accounts) { panic(fmt.Sprintf("Richard: not the same len, pend_len= %d r_acc_len=%d", len(s.stateObjectsPending), len(s.r_accounts))) } for addr := range s.stateObjectsPending { if obj := s.stateObjects[addr]; obj.deleted { - log.Info("Richard: delete", " addr=", addr) + // log.Info("Richard: delete", " addr=", addr) s.deleteStateObject(obj) destructNum = destructNum + 1 + log.Info("richard: delete obj", "addr", addr) if _, exist := s.r_destructs[crypto.Keccak256Hash(addr[:])]; !exist{ panic(fmt.Sprintf("failed to find destruct account %x", addr)) } @@ -1136,6 +1162,7 @@ func (s *StateDB) StateIntermediateRoot() common.Hash { // log.Info("Richard: update", " addr=", addr) s.updateStateObject(obj) accountNum = accountNum + 1 + log.Info("richard: update obj", "addr", addr, "obj=", obj.data) if r_acc_d, exist := s.accounts[crypto.Keccak256Hash(addr[:])]; !exist{ panic(fmt.Sprintf("failed to find account %x", addr)) } else { @@ -1165,6 +1192,7 @@ func (s *StateDB) StateIntermediateRoot() common.Hash { } usedAddrs = append(usedAddrs, common.CopyBytes(addr[:])) // Copy needed for closure } + log.Info("richard: finish state intermediate root") if prefetcher != nil { prefetcher.used(common.Hash{}, s.originalRoot, usedAddrs) } @@ -1184,6 +1212,7 @@ func (s *StateDB) StateIntermediateRoot() common.Hash { if s.noTrie { return s.expectedRoot } else { + log.Info("richard: state root", "state_root=", s.trie.Hash()) return s.trie.Hash() } } @@ -1425,6 +1454,7 @@ func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.A // The associated block number of the state transition is also provided // for more chain context. func (s *StateDB) Commit(block uint64, postCommitFunc func() error) (common.Hash, *types.DiffLayer, error) { + log.Info("richard: commit block", "number=", block) // log.Info("Richard: start to commit state", "block=", block) // Short circuit in case any database failure occurred earlier. if s.dbErr != nil { From 34f4f063221434c3aaf7c0b9b81db4ffbcc73651 Mon Sep 17 00:00:00 2001 From: flywukong <2229306838@qq.com> Date: Tue, 10 Dec 2024 08:42:50 +0000 Subject: [PATCH 09/13] get verified state root for CA before update CA trie --- core/state/state_object.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/core/state/state_object.go b/core/state/state_object.go index 2b4302401c..670b9373a4 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -23,6 +23,7 @@ import ( "sync" "time" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" @@ -162,6 +163,7 @@ func (s *stateObject) getTrie() (Trie, error) { if err != nil { return nil, err } + log.Info("richard: open ca trie for addr", "state_root=", s.db.originalRoot, " addr=", s.address, "t_root=", s.data.Root) s.trie = tr // } } @@ -354,21 +356,25 @@ func (s *stateObject) updateTrie() (Trie, error) { wg.Add(1) go func() { defer wg.Done() + log.Info("richard: before update trie", "addr=", s.address, " root=", s.trie.Hash()) for key, value := range dirtyStorage { if len(value) == 0 { if err := tr.DeleteStorage(s.address, key[:]); err != nil { s.db.setError(err) } s.db.StorageDeleted += 1 + log.Info("richard: delete key", "key=", key) } else { if err := tr.UpdateStorage(s.address, key[:], value); err != nil { s.db.setError(err) } s.db.StorageUpdated += 1 + log.Info("richard: update kv", "key=", key, " val=", value) } // Cache the items for preloading usedStorage = append(usedStorage, common.CopyBytes(key[:])) } + log.Info("richard: after update trie", "addr=", s.address, " root=", s.trie.Hash()) }() // If state snapshotting is active, cache the data til commit wg.Add(1) From f95048550a3fcc06d740645a4f49ab2a51d6048f Mon Sep 17 00:00:00 2001 From: flywukong <2229306838@qq.com> Date: Tue, 10 Dec 2024 08:58:44 +0000 Subject: [PATCH 10/13] remove panic check --- core/state/statedb.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index 92115818fd..9a9792699f 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -1146,9 +1146,9 @@ func (s *StateDB) StateIntermediateRoot() common.Hash { destructNum := 0 if !s.noTrie { log.Info("richard: start to state intermediate root") - if len(s.stateObjectsPending) > 0 && len(s.stateObjectsPending) != len(s.r_accounts) { - panic(fmt.Sprintf("Richard: not the same len, pend_len= %d r_acc_len=%d", len(s.stateObjectsPending), len(s.r_accounts))) - } + // if len(s.stateObjectsPending) > 0 && len(s.stateObjectsPending) != len(s.r_accounts) { + // panic(fmt.Sprintf("Richard: not the same len, pend_len= %d r_acc_len=%d", len(s.stateObjectsPending), len(s.r_accounts))) + // } for addr := range s.stateObjectsPending { if obj := s.stateObjects[addr]; obj.deleted { // log.Info("Richard: delete", " addr=", addr) From 162decf70aa83e1424d67ddb135d88c707984b6a Mon Sep 17 00:00:00 2001 From: joeylichang Date: Wed, 11 Dec 2024 16:09:09 +0800 Subject: [PATCH 11/13] fix: write code after executing --- core/state/state_object.go | 13 ++- core/state/statedb.go | 169 +++++++++++++++++++------------------ 2 files changed, 95 insertions(+), 87 deletions(-) diff --git a/core/state/state_object.go b/core/state/state_object.go index 670b9373a4..ee9ec51ed3 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -19,14 +19,15 @@ package state import ( "bytes" "fmt" + "github.com/ethereum/go-ethereum/core/rawdb" "io" "sync" "time" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie/trienode" @@ -419,13 +420,13 @@ func (s *stateObject) updateTrie() (Trie, error) { } // compare the storge - if r_storage, ok := s.db.r_storages[s.addrHash]; !ok && len(storage)>0 { + if r_storage, ok := s.db.r_storages[s.addrHash]; !ok && len(storage) > 0 { panic(fmt.Sprintf("Richard: state object, can't find the changed storages for addr=%x", s.addrHash)) } else { if len(storage) != len(r_storage) { panic(fmt.Sprintf("Richard: state obj changes not the same len, len_r=%d len=%d", len(r_storage), len(storage))) } - for k,v := range storage { + for k, v := range storage { if r_v, ok := r_storage[k]; !ok { panic(fmt.Sprintf("Richard: not found k=%x for addr=%x", k, s.addrHash)) } else { @@ -669,3 +670,9 @@ func (s *stateObject) GetPendingStorages() map[common.Hash][]byte { } return nil } + +func (s *stateObject) WriteCode() { + if s.code != nil && s.dirtyCode { + rawdb.WriteCode(s.db.db.DiskDB(), common.BytesToHash(s.CodeHash()), s.code) + } +} diff --git a/core/state/statedb.go b/core/state/statedb.go index 9a9792699f..f927d45c2d 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -18,15 +18,14 @@ package state import ( + "bytes" "errors" "fmt" "runtime" "sort" "sync" "time" - "bytes" - "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/gopool" "github.com/ethereum/go-ethereum/core/rawdb" @@ -37,6 +36,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/triestate" @@ -94,8 +94,8 @@ type StateDB struct { storagesOrigin map[common.Address]map[common.Hash][]byte // The original value of mutated slots in prefix-zero trimmed rlp format r_destructs map[common.Hash]struct{} - r_accounts map[common.Hash][]byte - r_storages map[common.Hash]map[common.Hash][]byte + r_accounts map[common.Hash][]byte + r_storages map[common.Hash]map[common.Hash][]byte // This map holds 'live' objects, which will get modified while processing // a state transition. @@ -1048,21 +1048,21 @@ func (s *StateDB) GetLatestVerifiedStateRoot(addrHash common.Hash) common.Hash { if s.snaps != nil { s.snap = s.snaps.Snapshot(s.originalRoot) acc, err := s.snap.Account(addrHash) - if err == nil { - if acc == nil { - return types.EmptyRootHash - } + if err == nil { + if acc == nil { + return types.EmptyRootHash + } data := &types.StateAccount{ - Nonce: acc.Nonce, - Balance: acc.Balance, - CodeHash: acc.CodeHash, - Root: common.BytesToHash(acc.Root), - } - if data.Root == (common.Hash{}) { - data.Root = types.EmptyRootHash - } + Nonce: acc.Nonce, + Balance: acc.Balance, + CodeHash: acc.CodeHash, + Root: common.BytesToHash(acc.Root), + } + if data.Root == (common.Hash{}) { + data.Root = types.EmptyRootHash + } return data.Root - } + } } return types.EmptyRootHash } @@ -1155,40 +1155,40 @@ func (s *StateDB) StateIntermediateRoot() common.Hash { s.deleteStateObject(obj) destructNum = destructNum + 1 log.Info("richard: delete obj", "addr", addr) - if _, exist := s.r_destructs[crypto.Keccak256Hash(addr[:])]; !exist{ - panic(fmt.Sprintf("failed to find destruct account %x", addr)) - } + if _, exist := s.r_destructs[crypto.Keccak256Hash(addr[:])]; !exist { + panic(fmt.Sprintf("failed to find destruct account %x", addr)) + } } else { // log.Info("Richard: update", " addr=", addr) s.updateStateObject(obj) accountNum = accountNum + 1 log.Info("richard: update obj", "addr", addr, "obj=", obj.data) - if r_acc_d, exist := s.accounts[crypto.Keccak256Hash(addr[:])]; !exist{ - panic(fmt.Sprintf("failed to find account %x", addr)) - } else { - r_acc := new(types.SlimAccount) - if err := rlp.DecodeBytes(r_acc_d, r_acc); err != nil { - panic(err) - } - acc := new(types.SlimAccount) - if err := rlp.DecodeBytes(r_acc_d, acc); err != nil { - panic(err) - } - - if r_acc.Nonce != obj.data.Nonce { - log.Crit("Richard:", "mismatch nonce for addr=", addr, " r_n=", r_acc.Nonce, " n=", obj.data.Nonce) - } - if !(new(uint256.Int).Sub(r_acc.Balance, obj.data.Balance).IsZero()) { - log.Crit("Richard:", "mismatch balance for addr=", addr, " r_b=", r_acc.Balance, " b=", obj.data.Balance) - } - if !bytes.Equal(r_acc.CodeHash, obj.data.CodeHash) { - if bytes.Equal(obj.data.CodeHash, types.EmptyCodeHash[:]) { - // log.Warn("Richard: empty code hash") - } else { - log.Crit("Richard:", "mismatch codehash for addr=", addr, " r_c=", r_acc.CodeHash, " c=", obj.data.CodeHash) - } - } - } + if r_acc_d, exist := s.accounts[crypto.Keccak256Hash(addr[:])]; !exist { + panic(fmt.Sprintf("failed to find account %x", addr)) + } else { + r_acc := new(types.SlimAccount) + if err := rlp.DecodeBytes(r_acc_d, r_acc); err != nil { + panic(err) + } + acc := new(types.SlimAccount) + if err := rlp.DecodeBytes(r_acc_d, acc); err != nil { + panic(err) + } + + if r_acc.Nonce != obj.data.Nonce { + log.Crit("Richard:", "mismatch nonce for addr=", addr, " r_n=", r_acc.Nonce, " n=", obj.data.Nonce) + } + if !(new(uint256.Int).Sub(r_acc.Balance, obj.data.Balance).IsZero()) { + log.Crit("Richard:", "mismatch balance for addr=", addr, " r_b=", r_acc.Balance, " b=", obj.data.Balance) + } + if !bytes.Equal(r_acc.CodeHash, obj.data.CodeHash) { + if bytes.Equal(obj.data.CodeHash, types.EmptyCodeHash[:]) { + // log.Warn("Richard: empty code hash") + } else { + log.Crit("Richard:", "mismatch codehash for addr=", addr, " r_c=", r_acc.CodeHash, " c=", obj.data.CodeHash) + } + } + } } usedAddrs = append(usedAddrs, common.CopyBytes(addr[:])) // Copy needed for closure } @@ -1196,7 +1196,7 @@ func (s *StateDB) StateIntermediateRoot() common.Hash { if prefetcher != nil { prefetcher.used(common.Hash{}, s.originalRoot, usedAddrs) } - + if (len(s.r_destructs) != destructNum || len(s.r_accounts) != accountNum) && len(s.stateObjectsPending) != 0 { log.Info("Richard:", "r_de_len=", len(s.r_destructs), " destructNum=", destructNum, " r_ac_len=", len(s.r_accounts), " accountsN=", accountNum) } @@ -1641,7 +1641,7 @@ func (s *StateDB) Commit(block uint64, postCommitFunc func() error) (common.Hash err := s.snaps.Update(s.expectedRoot, parent, s.convertAccountSet(s.stateObjectsDestruct), s.accounts, s.storages, true) // compare if len(s.r_destructs) != len(s.convertAccountSet(s.stateObjectsDestruct)) || len(s.accounts) != len(s.r_accounts) || len(s.storages) != len(s.r_storages) { - panic(fmt.Sprintf("Richard: no the same len, len_r_d=%d len_d=%d len_r_a=%d len_a=%d len_r_s=%d len_s=%d", len(s.r_destructs), len(s.convertAccountSet(s.stateObjectsDestruct)), len(s.r_accounts), len(s.accounts), len(s.r_storages), len(s.storages) )) + panic(fmt.Sprintf("Richard: no the same len, len_r_d=%d len_d=%d len_r_a=%d len_a=%d len_r_s=%d len_s=%d", len(s.r_destructs), len(s.convertAccountSet(s.stateObjectsDestruct)), len(s.r_accounts), len(s.accounts), len(s.r_storages), len(s.storages))) } for addr := range s.convertAccountSet(s.stateObjectsDestruct) { if _, ok := s.r_destructs[addr]; !ok { @@ -1655,28 +1655,28 @@ func (s *StateDB) Commit(block uint64, postCommitFunc func() error) (common.Hash // if !bytes.Equal(acc, r_acc) { // panic(fmt.Sprintf("Richard: accounts mismatch for addr=%x", addr)) // } - r_acc := new(types.SlimAccount) - if err := rlp.DecodeBytes(r_acc_d, r_acc); err != nil { - panic(err) - } - acc := new(types.SlimAccount) - if err := rlp.DecodeBytes(acc_d, acc); err != nil { - panic(err) - } - - if r_acc.Nonce != acc.Nonce { - log.Crit("Richard:", "mismatch nonce for addr=", addr, " r_n=", r_acc.Nonce, " n=", acc.Nonce) - } - if !(new(uint256.Int).Sub(r_acc.Balance, acc.Balance).IsZero()) { - log.Crit("Richard:", "mismatch balance for addr=", addr, " r_b=", r_acc.Balance, " b=", acc.Balance) - } - if !bytes.Equal(r_acc.CodeHash, acc.CodeHash) { - if bytes.Equal(acc.CodeHash, types.EmptyCodeHash[:]) { - // log.Info("Richard: empty code hash") - } else { - log.Crit("Richard:", "mismatch codehash for addr=", addr, " r_c=", r_acc.CodeHash, " c=", acc.CodeHash) - } - } + r_acc := new(types.SlimAccount) + if err := rlp.DecodeBytes(r_acc_d, r_acc); err != nil { + panic(err) + } + acc := new(types.SlimAccount) + if err := rlp.DecodeBytes(acc_d, acc); err != nil { + panic(err) + } + + if r_acc.Nonce != acc.Nonce { + log.Crit("Richard:", "mismatch nonce for addr=", addr, " r_n=", r_acc.Nonce, " n=", acc.Nonce) + } + if !(new(uint256.Int).Sub(r_acc.Balance, acc.Balance).IsZero()) { + log.Crit("Richard:", "mismatch balance for addr=", addr, " r_b=", r_acc.Balance, " b=", acc.Balance) + } + if !bytes.Equal(r_acc.CodeHash, acc.CodeHash) { + if bytes.Equal(acc.CodeHash, types.EmptyCodeHash[:]) { + // log.Info("Richard: empty code hash") + } else { + log.Crit("Richard:", "mismatch codehash for addr=", addr, " r_c=", r_acc.CodeHash, " c=", acc.CodeHash) + } + } } } for addr, storage := range s.storages { @@ -1741,8 +1741,8 @@ func (s *StateDB) Commit(block uint64, postCommitFunc func() error) (common.Hash } // Clear all internal flags at the end of commit operation. s.r_destructs = make(map[common.Hash]struct{}) - s.r_accounts = make(map[common.Hash][]byte) - s.r_storages = make(map[common.Hash]map[common.Hash][]byte) + s.r_accounts = make(map[common.Hash][]byte) + s.r_storages = make(map[common.Hash]map[common.Hash][]byte) s.accounts = make(map[common.Hash][]byte) s.storages = make(map[common.Hash]map[common.Hash][]byte) @@ -1758,18 +1758,19 @@ func (s *StateDB) CommitUnVerifiedSnapDifflayer(deleteEmptyObjects bool) { s.Finalise(deleteEmptyObjects) s.r_destructs = s.convertAccountSet(s.stateObjectsDestruct) - s.r_accounts = make(map[common.Hash][]byte) - s.r_storages = make(map[common.Hash]map[common.Hash][]byte) - - for addr := range s.stateObjectsPending { - if obj := s.stateObjects[addr]; !obj.deleted { - s.r_accounts[obj.addrHash] = types.SlimAccountRLP(obj.data) - pendingstorages := obj.GetPendingStorages() - if pendingstorages != nil { - s.r_storages[obj.addrHash] = pendingstorages - } - } - } + s.r_accounts = make(map[common.Hash][]byte) + s.r_storages = make(map[common.Hash]map[common.Hash][]byte) + + for addr := range s.stateObjectsPending { + if obj := s.stateObjects[addr]; !obj.deleted { + s.r_accounts[obj.addrHash] = types.SlimAccountRLP(obj.data) + obj.WriteCode() + pendingstorages := obj.GetPendingStorages() + if pendingstorages != nil { + s.r_storages[obj.addrHash] = pendingstorages + } + } + } if s.snap != nil { if parent := s.snap.Root(); parent != s.expectedRoot { From a4d56ad48ab859d80a29615b24f402771de7a779 Mon Sep 17 00:00:00 2001 From: Rick975 Date: Thu, 12 Dec 2024 14:58:32 +0800 Subject: [PATCH 12/13] not update state root with newly created account --- core/state/statedb.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index f927d45c2d..6aa272d746 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -1096,7 +1096,9 @@ func (s *StateDB) AccountsIntermediateRoot() { updateAccountNum++ wg.Add(1) tasks <- func() { - obj.data.Root = s.GetLatestVerifiedStateRoot(obj.addrHash) + if _, ok := s.r_destructs[obj.addrHash]; !ok { + obj.data.Root = s.GetLatestVerifiedStateRoot(obj.addrHash) + } obj.updateRoot() // Cache the data until commit. Note, this update mechanism is not symmetric From 0217ae887278684c10ddb951085e8af657af441b Mon Sep 17 00:00:00 2001 From: flywukong <2229306838@qq.com> Date: Tue, 10 Dec 2024 19:37:54 +0800 Subject: [PATCH 13/13] fix metrics & remove log --- core/blockchain.go | 24 +++-- core/state/state_object.go | 47 +++++---- core/state/statedb.go | 199 +++++++++++++++++++------------------ 3 files changed, 143 insertions(+), 127 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index fdc4b7a2a1..87aeaa33f7 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -388,7 +388,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis diffQueue: prque.New[int64, *types.DiffLayer](nil), diffQueueBuffer: make(chan *types.DiffLayer), // verifyTaskCh: make(chan *VerifyTask, 1024), - verifyTaskCh: make(chan *VerifyTask, 1), + verifyTaskCh: make(chan *VerifyTask, 128), } bc.flushInterval.Store(int64(cacheConfig.TrieTimeLimit)) bc.forker = NewForkChoice(bc, shouldPreserve) @@ -2248,7 +2248,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) if parent == nil { parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1) } - + start := time.Now() statedb, err := state.NewWithSharedPool(parent.Root, bc.stateCache, bc.snaps) if err != nil { return it.index, err @@ -2269,12 +2269,12 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) // 2.do trie prefetch for MPT trie node cache // it is for the big state trie tree, prefetch based on transaction's From/To address. // trie prefetcher is thread safe now, ok to prefetch in a separate routine - // go throwaway.TriePrefetchInAdvance(block, signer) + // go throwaway.TriePrefetchInAdvance(block, signer) } // Process block using the parent state as reference point statedb.SetExpectedStateRoot(block.Root()) - // pstart := time.Now() + pstart := time.Now() statedb, receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig) close(interruptCh) // state prefetch can be stopped if err != nil { @@ -2282,10 +2282,11 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) statedb.StopPrefetcher() return it.index, err } -// ptime := time.Since(pstart) + ptime := time.Since(pstart) statedb.CommitUnVerifiedSnapDifflayer(bc.chainConfig.IsEIP158(block.Number())) - log.Info("Richard: successfully process", " block=", block.Number()) + //log.Info("Richard: successfully process", " block=", block.Number()) + blockExecutionTimer.Update(ptime) // Add to cache bc.blockCache.Add(block.Hash(), block) bc.hc.numberCache.Add(block.Hash(), block.NumberU64()) @@ -2297,7 +2298,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) bc.hc.tdCache.Add(block.Hash(), externTd) -// vstart := time.Now() + // vstart := time.Now() task := &VerifyTask{ block: block, state: statedb, @@ -2306,7 +2307,8 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) logs: logs, } bc.verifyTaskCh <- task -/* + blockInsertTimer.UpdateSince(start) + /*\\ // Validate the state using the default validator //vstart := time.Now() //if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil { @@ -2408,7 +2410,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) // "root", block.Root()) //} //bc.chainBlockFeed.Send(ChainHeadEvent{block}) -*/ + */ } // Any blocks remaining here? The only ones we care about are the future ones @@ -2448,15 +2450,19 @@ func (bc *BlockChain) VerifyLoop() { case <-bc.quit: return case task := <-bc.verifyTaskCh: + vstart := time.Now() if err := bc.validator.ValidateState(task.block, task.state, task.receipts, task.usedGas); err != nil { log.Crit("validate state failed", "error", err) } + blockValidationTimer.UpdateSince(vstart) + cstart := time.Now() if err := bc.commitState(task.block, task.receipts, task.state); err != nil { log.Crit("commit state failed", "error", err) } if _, err := bc.writeBlockAndSetHead(task.block, task.receipts, task.logs, task.state, false); err != nil { log.Crit("write block and set head failed", "error", err) } + blockWriteTimer.UpdateSince(cstart) bc.chainBlockFeed.Send(ChainHeadEvent{task.block}) log.Info("Richard: successfully verify", "block=", task.block.Number()) } diff --git a/core/state/state_object.go b/core/state/state_object.go index ee9ec51ed3..74937a9e85 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -19,15 +19,15 @@ package state import ( "bytes" "fmt" - "github.com/ethereum/go-ethereum/core/rawdb" "io" "sync" "time" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie/trienode" @@ -164,7 +164,7 @@ func (s *stateObject) getTrie() (Trie, error) { if err != nil { return nil, err } - log.Info("richard: open ca trie for addr", "state_root=", s.db.originalRoot, " addr=", s.address, "t_root=", s.data.Root) + // log.Info("richard: open ca trie for addr", "state_root=", s.db.originalRoot, " addr=", s.address, "t_root=", s.data.Root) s.trie = tr // } } @@ -357,25 +357,25 @@ func (s *stateObject) updateTrie() (Trie, error) { wg.Add(1) go func() { defer wg.Done() - log.Info("richard: before update trie", "addr=", s.address, " root=", s.trie.Hash()) + // log.Info("richard: before update trie", "addr=", s.address, " root=", s.trie.Hash()) for key, value := range dirtyStorage { if len(value) == 0 { if err := tr.DeleteStorage(s.address, key[:]); err != nil { s.db.setError(err) } s.db.StorageDeleted += 1 - log.Info("richard: delete key", "key=", key) + // log.Info("richard: delete key", "key=", key) } else { if err := tr.UpdateStorage(s.address, key[:], value); err != nil { s.db.setError(err) } s.db.StorageUpdated += 1 - log.Info("richard: update kv", "key=", key, " val=", value) + // log.Info("richard: update kv", "key=", key, " val=", value) } // Cache the items for preloading usedStorage = append(usedStorage, common.CopyBytes(key[:])) } - log.Info("richard: after update trie", "addr=", s.address, " root=", s.trie.Hash()) + // log.Info("richard: after update trie", "addr=", s.address, " root=", s.trie.Hash()) }() // If state snapshotting is active, cache the data til commit wg.Add(1) @@ -419,24 +419,27 @@ func (s *stateObject) updateTrie() (Trie, error) { } } - // compare the storge - if r_storage, ok := s.db.r_storages[s.addrHash]; !ok && len(storage) > 0 { - panic(fmt.Sprintf("Richard: state object, can't find the changed storages for addr=%x", s.addrHash)) - } else { - if len(storage) != len(r_storage) { - panic(fmt.Sprintf("Richard: state obj changes not the same len, len_r=%d len=%d", len(r_storage), len(storage))) - } - for k, v := range storage { - if r_v, ok := r_storage[k]; !ok { - panic(fmt.Sprintf("Richard: not found k=%x for addr=%x", k, s.addrHash)) - } else { - if !bytes.Equal(v, r_v) { - panic(fmt.Sprintf("Richard: mismatch value(r_v=%x v=%x) for key=%x addr=%x", r_v, v, k, s.addrHash)) + /* + // compare the storge + if r_storage, ok := s.db.r_storages[s.addrHash]; !ok && len(storage)>0 { + panic(fmt.Sprintf("Richard: state object, can't find the changed storages for addr=%x", s.addrHash)) + } else { + if len(storage) != len(r_storage) { + panic(fmt.Sprintf("Richard: state obj changes not the same len, len_r=%d len=%d", len(r_storage), len(storage))) + } + for k,v := range storage { + if r_v, ok := r_storage[k]; !ok { + panic(fmt.Sprintf("Richard: not found k=%x for addr=%x", k, s.addrHash)) + } else { + if !bytes.Equal(v, r_v) { + panic(fmt.Sprintf("Richard: mismatch value(r_v=%x v=%x) for key=%x addr=%x", r_v, v, k, s.addrHash)) + } } } + // log.Info("Richard: the same state changes for addr", "addr=", s.addrHash) } - // log.Info("Richard: the same state changes for addr", "addr=", s.addrHash) - } + + */ }() wg.Wait() diff --git a/core/state/statedb.go b/core/state/statedb.go index 6aa272d746..987a24e1da 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -18,7 +18,6 @@ package state import ( - "bytes" "errors" "fmt" "runtime" @@ -36,7 +35,6 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/triestate" @@ -1147,7 +1145,7 @@ func (s *StateDB) StateIntermediateRoot() common.Hash { accountNum := 0 destructNum := 0 if !s.noTrie { - log.Info("richard: start to state intermediate root") + //log.Info("richard: start to state intermediate root") // if len(s.stateObjectsPending) > 0 && len(s.stateObjectsPending) != len(s.r_accounts) { // panic(fmt.Sprintf("Richard: not the same len, pend_len= %d r_acc_len=%d", len(s.stateObjectsPending), len(s.r_accounts))) // } @@ -1156,52 +1154,60 @@ func (s *StateDB) StateIntermediateRoot() common.Hash { // log.Info("Richard: delete", " addr=", addr) s.deleteStateObject(obj) destructNum = destructNum + 1 - log.Info("richard: delete obj", "addr", addr) - if _, exist := s.r_destructs[crypto.Keccak256Hash(addr[:])]; !exist { - panic(fmt.Sprintf("failed to find destruct account %x", addr)) - } + /* + log.Info("richard: delete obj", "addr", addr) + if _, exist := s.r_destructs[crypto.Keccak256Hash(addr[:])]; !exist { + panic(fmt.Sprintf("failed to find destruct account %x", addr)) + } + + */ } else { // log.Info("Richard: update", " addr=", addr) s.updateStateObject(obj) accountNum = accountNum + 1 - log.Info("richard: update obj", "addr", addr, "obj=", obj.data) - if r_acc_d, exist := s.accounts[crypto.Keccak256Hash(addr[:])]; !exist { - panic(fmt.Sprintf("failed to find account %x", addr)) - } else { - r_acc := new(types.SlimAccount) - if err := rlp.DecodeBytes(r_acc_d, r_acc); err != nil { - panic(err) - } - acc := new(types.SlimAccount) - if err := rlp.DecodeBytes(r_acc_d, acc); err != nil { - panic(err) - } + // log.Info("richard: update obj", "addr", addr, "obj=", obj.data) + /* + if r_acc_d, exist := s.accounts[crypto.Keccak256Hash(addr[:])]; !exist { + panic(fmt.Sprintf("failed to find account %x", addr)) + } else { + r_acc := new(types.SlimAccount) + if err := rlp.DecodeBytes(r_acc_d, r_acc); err != nil { + panic(err) + } + acc := new(types.SlimAccount) + if err := rlp.DecodeBytes(r_acc_d, acc); err != nil { + panic(err) + } - if r_acc.Nonce != obj.data.Nonce { - log.Crit("Richard:", "mismatch nonce for addr=", addr, " r_n=", r_acc.Nonce, " n=", obj.data.Nonce) - } - if !(new(uint256.Int).Sub(r_acc.Balance, obj.data.Balance).IsZero()) { - log.Crit("Richard:", "mismatch balance for addr=", addr, " r_b=", r_acc.Balance, " b=", obj.data.Balance) - } - if !bytes.Equal(r_acc.CodeHash, obj.data.CodeHash) { - if bytes.Equal(obj.data.CodeHash, types.EmptyCodeHash[:]) { - // log.Warn("Richard: empty code hash") - } else { - log.Crit("Richard:", "mismatch codehash for addr=", addr, " r_c=", r_acc.CodeHash, " c=", obj.data.CodeHash) + if r_acc.Nonce != obj.data.Nonce { + log.Crit("Richard:", "mismatch nonce for addr=", addr, " r_n=", r_acc.Nonce, " n=", obj.data.Nonce) + } + if !(new(uint256.Int).Sub(r_acc.Balance, obj.data.Balance).IsZero()) { + log.Crit("Richard:", "mismatch balance for addr=", addr, " r_b=", r_acc.Balance, " b=", obj.data.Balance) + } + if !bytes.Equal(r_acc.CodeHash, obj.data.CodeHash) { + if bytes.Equal(obj.data.CodeHash, types.EmptyCodeHash[:]) { + // log.Warn("Richard: empty code hash") + } else { + log.Crit("Richard:", "mismatch codehash for addr=", addr, " r_c=", r_acc.CodeHash, " c=", obj.data.CodeHash) + } } } - } + + */ } usedAddrs = append(usedAddrs, common.CopyBytes(addr[:])) // Copy needed for closure } - log.Info("richard: finish state intermediate root") + //log.Info("richard: finish state intermediate root") if prefetcher != nil { prefetcher.used(common.Hash{}, s.originalRoot, usedAddrs) } + /* + if (len(s.r_destructs) != destructNum || len(s.r_accounts) != accountNum) && len(s.stateObjectsPending) != 0 { + log.Info("Richard:", "r_de_len=", len(s.r_destructs), " destructNum=", destructNum, " r_ac_len=", len(s.r_accounts), " accountsN=", accountNum) + } - if (len(s.r_destructs) != destructNum || len(s.r_accounts) != accountNum) && len(s.stateObjectsPending) != 0 { - log.Info("Richard:", "r_de_len=", len(s.r_destructs), " destructNum=", destructNum, " r_ac_len=", len(s.r_accounts), " accountsN=", accountNum) - } + */ } if len(s.stateObjectsPending) > 0 { @@ -1214,7 +1220,7 @@ func (s *StateDB) StateIntermediateRoot() common.Hash { if s.noTrie { return s.expectedRoot } else { - log.Info("richard: state root", "state_root=", s.trie.Hash()) + // log.Info("richard: state root", "state_root=", s.trie.Hash()) return s.trie.Hash() } } @@ -1456,7 +1462,7 @@ func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.A // The associated block number of the state transition is also provided // for more chain context. func (s *StateDB) Commit(block uint64, postCommitFunc func() error) (common.Hash, *types.DiffLayer, error) { - log.Info("richard: commit block", "number=", block) + // log.Info("richard: commit block", "number=", block) // log.Info("Richard: start to commit state", "block=", block) // Short circuit in case any database failure occurred earlier. if s.dbErr != nil { @@ -1641,62 +1647,65 @@ func (s *StateDB) Commit(block uint64, postCommitFunc func() error) (common.Hash if parent := s.snap.Root(); parent != s.expectedRoot { // log.Info("Richard: start to update and verify the diff") err := s.snaps.Update(s.expectedRoot, parent, s.convertAccountSet(s.stateObjectsDestruct), s.accounts, s.storages, true) - // compare - if len(s.r_destructs) != len(s.convertAccountSet(s.stateObjectsDestruct)) || len(s.accounts) != len(s.r_accounts) || len(s.storages) != len(s.r_storages) { - panic(fmt.Sprintf("Richard: no the same len, len_r_d=%d len_d=%d len_r_a=%d len_a=%d len_r_s=%d len_s=%d", len(s.r_destructs), len(s.convertAccountSet(s.stateObjectsDestruct)), len(s.r_accounts), len(s.accounts), len(s.r_storages), len(s.storages))) - } - for addr := range s.convertAccountSet(s.stateObjectsDestruct) { - if _, ok := s.r_destructs[addr]; !ok { - panic(fmt.Sprintf("Richard: r_destructs has no addr=%x", addr)) - } - } - for addr, acc_d := range s.accounts { - if r_acc_d, ok := s.r_accounts[addr]; !ok { - panic(fmt.Sprintf("Richard: r_accounts has no addr=%x", addr)) - } else { - // if !bytes.Equal(acc, r_acc) { - // panic(fmt.Sprintf("Richard: accounts mismatch for addr=%x", addr)) - // } - r_acc := new(types.SlimAccount) - if err := rlp.DecodeBytes(r_acc_d, r_acc); err != nil { - panic(err) - } - acc := new(types.SlimAccount) - if err := rlp.DecodeBytes(acc_d, acc); err != nil { - panic(err) - } - - if r_acc.Nonce != acc.Nonce { - log.Crit("Richard:", "mismatch nonce for addr=", addr, " r_n=", r_acc.Nonce, " n=", acc.Nonce) - } - if !(new(uint256.Int).Sub(r_acc.Balance, acc.Balance).IsZero()) { - log.Crit("Richard:", "mismatch balance for addr=", addr, " r_b=", r_acc.Balance, " b=", acc.Balance) - } - if !bytes.Equal(r_acc.CodeHash, acc.CodeHash) { - if bytes.Equal(acc.CodeHash, types.EmptyCodeHash[:]) { - // log.Info("Richard: empty code hash") - } else { - log.Crit("Richard:", "mismatch codehash for addr=", addr, " r_c=", r_acc.CodeHash, " c=", acc.CodeHash) - } - } - } - } - for addr, storage := range s.storages { - if r_storage, ok := s.r_storages[addr]; !ok { - panic(fmt.Sprintf("Richard: r_storages has no addr=%x", addr)) - } else { - for k, v := range storage { - if r_v, ok := r_storage[k]; !ok { - panic(fmt.Sprintf("Richard: no storage for addr=%x k=%x", addr, k)) - } else { - if !bytes.Equal(v, r_v) { - panic(fmt.Sprintf("Richard: mismatch storage for addr=%x k=%x v=%x r_v=%x", addr, k, v, r_v)) - } - } - } - } - } - log.Info("Richard: commit successfully with the same created diff for block", " block=", block) + /* + // compare + if len(s.r_destructs) != len(s.convertAccountSet(s.stateObjectsDestruct)) || len(s.accounts) != len(s.r_accounts) || len(s.storages) != len(s.r_storages) { + panic(fmt.Sprintf("Richard: no the same len, len_r_d=%d len_d=%d len_r_a=%d len_a=%d len_r_s=%d len_s=%d", len(s.r_destructs), len(s.convertAccountSet(s.stateObjectsDestruct)), len(s.r_accounts), len(s.accounts), len(s.r_storages), len(s.storages) )) + } + for addr := range s.convertAccountSet(s.stateObjectsDestruct) { + if _, ok := s.r_destructs[addr]; !ok { + panic(fmt.Sprintf("Richard: r_destructs has no addr=%x", addr)) + } + } + + for addr, acc_d := range s.accounts { + if r_acc_d, ok := s.r_accounts[addr]; !ok { + panic(fmt.Sprintf("Richard: r_accounts has no addr=%x", addr)) + } else { + if !bytes.Equal(acc, r_acc) { + panic(fmt.Sprintf("Richard: accounts mismatch for addr=%x", addr)) + } + r_acc := new(types.SlimAccount) + if err := rlp.DecodeBytes(r_acc_d, r_acc); err != nil { + panic(err) + } + acc := new(types.SlimAccount) + if err := rlp.DecodeBytes(acc_d, acc); err != nil { + panic(err) + } + + if r_acc.Nonce != acc.Nonce { + log.Crit("Richard:", "mismatch nonce for addr=", addr, " r_n=", r_acc.Nonce, " n=", acc.Nonce) + } + if !(new(uint256.Int).Sub(r_acc.Balance, acc.Balance).IsZero()) { + log.Crit("Richard:", "mismatch balance for addr=", addr, " r_b=", r_acc.Balance, " b=", acc.Balance) + } + if !bytes.Equal(r_acc.CodeHash, acc.CodeHash) { + if bytes.Equal(acc.CodeHash, types.EmptyCodeHash[:]) { + // log.Info("Richard: empty code hash") + } else { + log.Crit("Richard:", "mismatch codehash for addr=", addr, " r_c=", r_acc.CodeHash, " c=", acc.CodeHash) + } + } + } + } + for addr, storage := range s.storages { + if r_storage, ok := s.r_storages[addr]; !ok { + panic(fmt.Sprintf("Richard: r_storages has no addr=%x", addr)) + } else { + for k, v := range storage { + if r_v, ok := r_storage[k]; !ok { + panic(fmt.Sprintf("Richard: no storage for addr=%x k=%x", addr, k)) + } else { + if !bytes.Equal(v, r_v) { + panic(fmt.Sprintf("Richard: mismatch storage for addr=%x k=%x v=%x r_v=%x", addr, k, v, r_v)) + } + } + } + } + } + */ + // log.Info("Richard: commit successfully with the same created diff for block", " block=", block) if err != nil { log.Warn("Failed to update snapshot tree", "from", parent, "to", s.expectedRoot, "err", err) @@ -1711,8 +1720,6 @@ func (s *StateDB) Commit(block uint64, postCommitFunc func() error) (common.Hash log.Warn("Failed to cap snapshot tree", "root", s.expectedRoot, "layers", s.snaps.CapLimit(), "err", err) } }() - } else { - log.Info("Richard: not to update and verify diff", "parent=", parent) } } return nil @@ -1752,7 +1759,7 @@ func (s *StateDB) Commit(block uint64, postCommitFunc func() error) (common.Hash s.storagesOrigin = make(map[common.Address]map[common.Hash][]byte) s.stateObjectsDirty = make(map[common.Address]struct{}) s.stateObjectsDestruct = make(map[common.Address]*types.StateAccount) - log.Info("Richard: successfully commit state", "block=", block) + //log.Info("Richard: successfully commit state", "block=", block) return root, diffLayer, nil }