Skip to content

Commit

Permalink
Merge pull request #1139 from maticnetwork/v1.2.3-candidate
Browse files Browse the repository at this point in the history
v1.2.3
  • Loading branch information
manav2401 authored Jan 23, 2024
2 parents dac1a42 + f1e0b1d commit 241af1f
Show file tree
Hide file tree
Showing 34 changed files with 352 additions and 192 deletions.
1 change: 1 addition & 0 deletions .github/matic-cli-config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,3 +23,4 @@ borDockerBuildContext: "../../bor"
heimdallDockerBuildContext: "https://github.com/maticnetwork/heimdall.git#develop"
sprintSizeBlockNumber:
- '0'
devnetBorFlags: config,config,config
1 change: 0 additions & 1 deletion builder/files/genesis-mainnet-v1.json
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
"bor": {
"jaipurBlock": 23850000,
"delhiBlock": 38189056,
"parallelUniverseBlock": 0,
"indoreBlock": 44934656,
"stateSyncConfirmationDelay": {
"44934656": 128
Expand Down
1 change: 0 additions & 1 deletion builder/files/genesis-testnet-v4.json
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
"bor": {
"jaipurBlock": 22770000,
"delhiBlock": 29638656,
"parallelUniverseBlock": 0,
"indoreBlock": 37075456,
"stateSyncConfirmationDelay": {
"37075456": 128
Expand Down
16 changes: 8 additions & 8 deletions consensus/bor/bor.go
Original file line number Diff line number Diff line change
Expand Up @@ -353,7 +353,7 @@ func (c *Bor) verifyHeader(chain consensus.ChainHeaderReader, header *types.Head
isSprintEnd := IsSprintStart(number+1, c.config.CalculateSprint(number))

// Ensure that the extra-data contains a signer list on checkpoint, but none otherwise
signersBytes := len(header.GetValidatorBytes(c.config))
signersBytes := len(header.GetValidatorBytes(c.chainConfig))

if !isSprintEnd && signersBytes != 0 {
return errExtraValidators
Expand Down Expand Up @@ -472,7 +472,7 @@ func (c *Bor) verifyCascadingFields(chain consensus.ChainHeaderReader, header *t

sort.Sort(valset.ValidatorsByAddress(newValidators))

headerVals, err := valset.ParseValidators(header.GetValidatorBytes(c.config))
headerVals, err := valset.ParseValidators(header.GetValidatorBytes(c.chainConfig))
if err != nil {
return err
}
Expand All @@ -490,7 +490,7 @@ func (c *Bor) verifyCascadingFields(chain consensus.ChainHeaderReader, header *t

// verify the validator list in the last sprint block
if IsSprintStart(number, c.config.CalculateSprint(number)) {
parentValidatorBytes := parent.GetValidatorBytes(c.config)
parentValidatorBytes := parent.GetValidatorBytes(c.chainConfig)
validatorsBytes := make([]byte, len(snap.ValidatorSet.Validators)*validatorHeaderBytesLength)

currentValidators := snap.ValidatorSet.Copy().Validators
Expand Down Expand Up @@ -521,7 +521,7 @@ func (c *Bor) snapshot(chain consensus.ChainHeaderReader, number uint64, hash co
val := valset.NewValidator(signer, 1000)
validatorset := valset.NewValidatorSet([]*valset.Validator{val})

snapshot := newSnapshot(c.config, c.signatures, number, hash, validatorset.Validators)
snapshot := newSnapshot(c.chainConfig, c.signatures, number, hash, validatorset.Validators)

return snapshot, nil
}
Expand All @@ -541,7 +541,7 @@ func (c *Bor) snapshot(chain consensus.ChainHeaderReader, number uint64, hash co

// If an on-disk checkpoint snapshot can be found, use that
if number%checkpointInterval == 0 {
if s, err := loadSnapshot(c.config, c.signatures, c.db, hash); err == nil {
if s, err := loadSnapshot(c.chainConfig, c.config, c.signatures, c.db, hash); err == nil {
log.Trace("Loaded snapshot from disk", "number", number, "hash", hash)

snap = s
Expand Down Expand Up @@ -570,7 +570,7 @@ func (c *Bor) snapshot(chain consensus.ChainHeaderReader, number uint64, hash co
}

// new snap shot
snap = newSnapshot(c.config, c.signatures, number, hash, validators)
snap = newSnapshot(c.chainConfig, c.signatures, number, hash, validators)
if err := snap.store(c.db); err != nil {
return nil, err
}
Expand Down Expand Up @@ -742,7 +742,7 @@ func (c *Bor) Prepare(chain consensus.ChainHeaderReader, header *types.Header) e
// sort validator by address
sort.Sort(valset.ValidatorsByAddress(newValidators))

if c.config.IsParallelUniverse(header.Number) {
if c.chainConfig.IsCancun(header.Number) {
var tempValidatorBytes []byte

for _, validator := range newValidators {
Expand All @@ -766,7 +766,7 @@ func (c *Bor) Prepare(chain consensus.ChainHeaderReader, header *types.Header) e
header.Extra = append(header.Extra, validator.HeaderBytes()...)
}
}
} else if c.config.IsParallelUniverse(header.Number) {
} else if c.chainConfig.IsCancun(header.Number) {
blockExtraData := &types.BlockExtraData{
ValidatorBytes: nil,
TxDependency: nil,
Expand Down
25 changes: 13 additions & 12 deletions consensus/bor/snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,9 @@ import (

// Snapshot is the state of the authorization voting at a given point in time.
type Snapshot struct {
config *params.BorConfig // Consensus engine parameters to fine tune behavior
sigcache *lru.ARCCache // Cache of recent block signatures to speed up ecrecover
chainConfig *params.ChainConfig

sigcache *lru.ARCCache // Cache of recent block signatures to speed up ecrecover

Number uint64 `json:"number"` // Block number where the snapshot was created
Hash common.Hash `json:"hash"` // Block hash where the snapshot was created
Expand All @@ -28,14 +29,14 @@ type Snapshot struct {
// method does not initialize the set of recent signers, so only ever use if for
// the genesis block.
func newSnapshot(
config *params.BorConfig,
chainConfig *params.ChainConfig,
sigcache *lru.ARCCache,
number uint64,
hash common.Hash,
validators []*valset.Validator,
) *Snapshot {
snap := &Snapshot{
config: config,
chainConfig: chainConfig,
sigcache: sigcache,
Number: number,
Hash: hash,
Expand All @@ -47,7 +48,7 @@ func newSnapshot(
}

// loadSnapshot loads an existing snapshot from the database.
func loadSnapshot(config *params.BorConfig, sigcache *lru.ARCCache, db ethdb.Database, hash common.Hash) (*Snapshot, error) {
func loadSnapshot(chainConfig *params.ChainConfig, config *params.BorConfig, sigcache *lru.ARCCache, db ethdb.Database, hash common.Hash) (*Snapshot, error) {
blob, err := db.Get(append([]byte("bor-"), hash[:]...))
if err != nil {
return nil, err
Expand All @@ -61,7 +62,7 @@ func loadSnapshot(config *params.BorConfig, sigcache *lru.ARCCache, db ethdb.Dat

snap.ValidatorSet.UpdateValidatorMap()

snap.config = config
snap.chainConfig = chainConfig
snap.sigcache = sigcache

// update total voting power
Expand All @@ -85,7 +86,7 @@ func (s *Snapshot) store(db ethdb.Database) error {
// copy creates a deep copy of the snapshot, though not the individual votes.
func (s *Snapshot) copy() *Snapshot {
cpy := &Snapshot{
config: s.config,
chainConfig: s.chainConfig,
sigcache: s.sigcache,
Number: s.Number,
Hash: s.Hash,
Expand Down Expand Up @@ -122,12 +123,12 @@ func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) {
number := header.Number.Uint64()

// Delete the oldest signer from the recent list to allow it signing again
if number >= s.config.CalculateSprint(number) {
delete(snap.Recents, number-s.config.CalculateSprint(number))
if number >= s.chainConfig.Bor.CalculateSprint(number) {
delete(snap.Recents, number-s.chainConfig.Bor.CalculateSprint(number))
}

// Resolve the authorization key and check against signers
signer, err := ecrecover(header, s.sigcache, s.config)
signer, err := ecrecover(header, s.sigcache, s.chainConfig.Bor)
if err != nil {
return nil, err
}
Expand All @@ -145,12 +146,12 @@ func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) {
snap.Recents[number] = signer

// change validator set and change proposer
if number > 0 && (number+1)%s.config.CalculateSprint(number) == 0 {
if number > 0 && (number+1)%s.chainConfig.Bor.CalculateSprint(number) == 0 {
if err := validateHeaderExtraField(header.Extra); err != nil {
return nil, err
}

validatorBytes := header.GetValidatorBytes(s.config)
validatorBytes := header.GetValidatorBytes(s.chainConfig)

// get validators from headers and use that for new validator set
newVals, _ := valset.ParseValidators(validatorBytes)
Expand Down
37 changes: 14 additions & 23 deletions core/blockstm/mvhashmap.go
Original file line number Diff line number Diff line change
Expand Up @@ -121,34 +121,23 @@ func (mv *MVHashMap) Write(k Key, v Version, data interface{}) {
return
})

cells.rw.RLock()
ci, ok := cells.tm.Get(v.TxnIndex)
cells.rw.RUnlock()

if ok {
cells.rw.Lock()
if ci, ok := cells.tm.Get(v.TxnIndex); !ok {
cells.tm.Put(v.TxnIndex, &WriteCell{
flag: FlagDone,
incarnation: v.Incarnation,
data: data,
})
} else {
if ci.(*WriteCell).incarnation > v.Incarnation {
panic(fmt.Errorf("existing transaction value does not have lower incarnation: %v, %v",
k, v.TxnIndex))
}

ci.(*WriteCell).flag = FlagDone
ci.(*WriteCell).incarnation = v.Incarnation
ci.(*WriteCell).data = data
} else {
cells.rw.Lock()
if ci, ok = cells.tm.Get(v.TxnIndex); !ok {
cells.tm.Put(v.TxnIndex, &WriteCell{
flag: FlagDone,
incarnation: v.Incarnation,
data: data,
})
} else {
ci.(*WriteCell).flag = FlagDone
ci.(*WriteCell).incarnation = v.Incarnation
ci.(*WriteCell).data = data
}
cells.rw.Unlock()
}
cells.rw.Unlock()
}

func (mv *MVHashMap) ReadStorage(k Key, fallBack func() any) any {
Expand All @@ -166,13 +155,13 @@ func (mv *MVHashMap) MarkEstimate(k Key, txIdx int) {
panic(fmt.Errorf("path must already exist"))
})

cells.rw.RLock()
cells.rw.Lock()
if ci, ok := cells.tm.Get(txIdx); !ok {
panic(fmt.Sprintf("should not happen - cell should be present for path. TxIdx: %v, path, %x, cells keys: %v", txIdx, k, cells.tm.Keys()))
} else {
ci.(*WriteCell).flag = FlagEstimate
}
cells.rw.RUnlock()
cells.rw.Unlock()
}

func (mv *MVHashMap) Delete(k Key, txIdx int) {
Expand Down Expand Up @@ -233,8 +222,8 @@ func (mv *MVHashMap) Read(k Key, txIdx int) (res MVReadResult) {
}

cells.rw.RLock()

fk, fv := cells.tm.Floor(txIdx - 1)
cells.rw.RUnlock()

if fk != nil && fv != nil {
c := fv.(*WriteCell)
Expand All @@ -253,6 +242,8 @@ func (mv *MVHashMap) Read(k Key, txIdx int) (res MVReadResult) {
}
}

cells.rw.RUnlock()

return
}

Expand Down
96 changes: 46 additions & 50 deletions core/parallel_state_processor.go
Original file line number Diff line number Diff line change
Expand Up @@ -288,6 +288,11 @@ func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.Stat

deps := GetDeps(blockTxDependency)

if !VerifyDeps(deps) || len(blockTxDependency) != len(block.Transactions()) {
blockTxDependency = nil
deps = make(map[int][]int)
}

if blockTxDependency != nil {
metadata = true
}
Expand All @@ -308,57 +313,30 @@ func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.Stat
shouldDelayFeeCal = false
}

if len(blockTxDependency) != len(block.Transactions()) {
task := &ExecutionTask{
msg: *msg,
config: p.config,
gasLimit: block.GasLimit(),
blockNumber: blockNumber,
blockHash: blockHash,
tx: tx,
index: i,
cleanStateDB: cleansdb,
finalStateDB: statedb,
blockChain: p.bc,
header: header,
evmConfig: cfg,
shouldDelayFeeCal: &shouldDelayFeeCal,
sender: msg.From,
totalUsedGas: usedGas,
receipts: &receipts,
allLogs: &allLogs,
dependencies: deps[i],
coinbase: coinbase,
blockContext: blockContext,
}

tasks = append(tasks, task)
} else {
task := &ExecutionTask{
msg: *msg,
config: p.config,
gasLimit: block.GasLimit(),
blockNumber: blockNumber,
blockHash: blockHash,
tx: tx,
index: i,
cleanStateDB: cleansdb,
finalStateDB: statedb,
blockChain: p.bc,
header: header,
evmConfig: cfg,
shouldDelayFeeCal: &shouldDelayFeeCal,
sender: msg.From,
totalUsedGas: usedGas,
receipts: &receipts,
allLogs: &allLogs,
dependencies: nil,
coinbase: coinbase,
blockContext: blockContext,
}

tasks = append(tasks, task)
task := &ExecutionTask{
msg: *msg,
config: p.config,
gasLimit: block.GasLimit(),
blockNumber: blockNumber,
blockHash: blockHash,
tx: tx,
index: i,
cleanStateDB: cleansdb,
finalStateDB: statedb,
blockChain: p.bc,
header: header,
evmConfig: cfg,
shouldDelayFeeCal: &shouldDelayFeeCal,
sender: msg.From,
totalUsedGas: usedGas,
receipts: &receipts,
allLogs: &allLogs,
dependencies: deps[i],
coinbase: coinbase,
blockContext: blockContext,
}

tasks = append(tasks, task)
}

backupStateDB := statedb.Copy()
Expand Down Expand Up @@ -427,3 +405,21 @@ func GetDeps(txDependency [][]uint64) map[int][]int {

return deps
}

// returns true if dependencies are correct
func VerifyDeps(deps map[int][]int) bool {
// number of transactions in the block
n := len(deps)

// Handle out-of-range and circular dependency problem
for i := 0; i <= n-1; i++ {
val := deps[i]
for _, depTx := range val {
if depTx >= n || depTx >= i {
return false
}
}
}

return true
}
Loading

0 comments on commit 241af1f

Please sign in to comment.