Skip to content

Commit

Permalink
Merge branch 'v1.2.6-beta-candidate' into measure-block-propagation
Browse files Browse the repository at this point in the history
  • Loading branch information
manav2401 committed Mar 4, 2024
2 parents 7a61a4b + 2dd9973 commit 6368b00
Show file tree
Hide file tree
Showing 49 changed files with 300 additions and 851 deletions.
1 change: 1 addition & 0 deletions .github/matic-cli-config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,3 +23,4 @@ borDockerBuildContext: "../../bor"
heimdallDockerBuildContext: "https://github.com/maticnetwork/heimdall.git#develop"
sprintSizeBlockNumber:
- '0'
devnetBorFlags: config,config,config
1 change: 0 additions & 1 deletion builder/files/genesis-mainnet-v1.json
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
"bor": {
"jaipurBlock": 23850000,
"delhiBlock": 38189056,
"parallelUniverseBlock": 0,
"indoreBlock": 44934656,
"stateSyncConfirmationDelay": {
"44934656": 128
Expand Down
2 changes: 1 addition & 1 deletion builder/files/genesis-testnet-v4.json
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,10 @@
"berlinBlock": 13996000,
"londonBlock": 22640000,
"shanghaiBlock": 41874000,
"cancunBlock": 45648608,
"bor": {
"jaipurBlock": 22770000,
"delhiBlock": 29638656,
"parallelUniverseBlock": 0,
"indoreBlock": 37075456,
"stateSyncConfirmationDelay": {
"37075456": 128
Expand Down
13 changes: 12 additions & 1 deletion cmd/bootnode/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,8 @@
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.

// bootnode runs a bootstrap node for the Ethereum Discovery Protocol.
package main
// Keep package as bootnode during upstram merge.
package bootnode

import (
"crypto/ecdsa"
Expand All @@ -34,6 +35,7 @@ import (
"github.com/ethereum/go-ethereum/p2p/netutil"
)

// nolint
func main() {
var (
listenAddr = flag.String("addr", ":30301", "listen address")
Expand Down Expand Up @@ -213,3 +215,12 @@ func doPortMapping(natm nat.Interface, ln *enode.LocalNode, addr *net.UDPAddr) *

return extaddr
}

// Implemented separate functions so that there are minimal conflicts during upstream merge
func PrintNotice(nodeKey *ecdsa.PublicKey, addr net.UDPAddr) {
printNotice(nodeKey, addr)
}

func DoPortMapping(natm nat.Interface, ln *enode.LocalNode, addr *net.UDPAddr) *net.UDPAddr {
return doPortMapping(natm, ln, addr)
}
6 changes: 1 addition & 5 deletions cmd/evm/testdata/3/readme.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,2 @@
<<<<<<< HEAD
These files examplify a transition where a transaction (executed on block 5) requests
=======
These files exemplify a transition where a transaction (executed on block 5) requests
>>>>>>> bed84606583893fdb698cc1b5058cc47b4dbd837
the blockhash for block `1`.
the blockhash for block `1`.
6 changes: 1 addition & 5 deletions cmd/evm/testdata/4/readme.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,3 @@
<<<<<<< HEAD
These files examplify a transition where a transaction (executed on block 5) requests
=======
These files exemplify a transition where a transaction (executed on block 5) requests
>>>>>>> bed84606583893fdb698cc1b5058cc47b4dbd837
the blockhash for block `4`, but where the hash for that block is missing.
the blockhash for block `4`, but where the hash for that block is missing.
It's expected that executing these should cause `exit` with errorcode `4`.
20 changes: 8 additions & 12 deletions consensus/bor/bor.go
Original file line number Diff line number Diff line change
Expand Up @@ -353,7 +353,7 @@ func (c *Bor) verifyHeader(chain consensus.ChainHeaderReader, header *types.Head
isSprintEnd := IsSprintStart(number+1, c.config.CalculateSprint(number))

// Ensure that the extra-data contains a signer list on checkpoint, but none otherwise
signersBytes := len(header.GetValidatorBytes(c.config))
signersBytes := len(header.GetValidatorBytes(c.chainConfig))

if !isSprintEnd && signersBytes != 0 {
return errExtraValidators
Expand Down Expand Up @@ -472,7 +472,7 @@ func (c *Bor) verifyCascadingFields(chain consensus.ChainHeaderReader, header *t

sort.Sort(valset.ValidatorsByAddress(newValidators))

headerVals, err := valset.ParseValidators(header.GetValidatorBytes(c.config))
headerVals, err := valset.ParseValidators(header.GetValidatorBytes(c.chainConfig))
if err != nil {
return err
}
Expand All @@ -490,7 +490,7 @@ func (c *Bor) verifyCascadingFields(chain consensus.ChainHeaderReader, header *t

// verify the validator list in the last sprint block
if IsSprintStart(number, c.config.CalculateSprint(number)) {
parentValidatorBytes := parent.GetValidatorBytes(c.config)
parentValidatorBytes := parent.GetValidatorBytes(c.chainConfig)
validatorsBytes := make([]byte, len(snap.ValidatorSet.Validators)*validatorHeaderBytesLength)

currentValidators := snap.ValidatorSet.Copy().Validators
Expand Down Expand Up @@ -521,7 +521,7 @@ func (c *Bor) snapshot(chain consensus.ChainHeaderReader, number uint64, hash co
val := valset.NewValidator(signer, 1000)
validatorset := valset.NewValidatorSet([]*valset.Validator{val})

snapshot := newSnapshot(c.config, c.signatures, number, hash, validatorset.Validators)
snapshot := newSnapshot(c.chainConfig, c.signatures, number, hash, validatorset.Validators)

return snapshot, nil
}
Expand All @@ -541,7 +541,7 @@ func (c *Bor) snapshot(chain consensus.ChainHeaderReader, number uint64, hash co

// If an on-disk checkpoint snapshot can be found, use that
if number%checkpointInterval == 0 {
if s, err := loadSnapshot(c.config, c.signatures, c.db, hash); err == nil {
if s, err := loadSnapshot(c.chainConfig, c.config, c.signatures, c.db, hash); err == nil {
log.Trace("Loaded snapshot from disk", "number", number, "hash", hash)

snap = s
Expand Down Expand Up @@ -570,7 +570,7 @@ func (c *Bor) snapshot(chain consensus.ChainHeaderReader, number uint64, hash co
}

// new snap shot
snap = newSnapshot(c.config, c.signatures, number, hash, validators)
snap = newSnapshot(c.chainConfig, c.signatures, number, hash, validators)
if err := snap.store(c.db); err != nil {
return nil, err
}
Expand Down Expand Up @@ -742,7 +742,7 @@ func (c *Bor) Prepare(chain consensus.ChainHeaderReader, header *types.Header) e
// sort validator by address
sort.Sort(valset.ValidatorsByAddress(newValidators))

if c.config.IsParallelUniverse(header.Number) {
if c.chainConfig.IsCancun(header.Number) {
var tempValidatorBytes []byte

for _, validator := range newValidators {
Expand All @@ -766,7 +766,7 @@ func (c *Bor) Prepare(chain consensus.ChainHeaderReader, header *types.Header) e
header.Extra = append(header.Extra, validator.HeaderBytes()...)
}
}
} else if c.config.IsParallelUniverse(header.Number) {
} else if c.chainConfig.IsCancun(header.Number) {
blockExtraData := &types.BlockExtraData{
ValidatorBytes: nil,
TxDependency: nil,
Expand Down Expand Up @@ -883,10 +883,6 @@ func (c *Bor) changeContractCodeIfNeeded(headerNumber uint64, state *state.State
for addr, account := range allocs {
log.Info("change contract code", "address", addr)
state.SetCode(addr, account.Code)

if state.GetBalance(addr).Cmp(big.NewInt(0)) == 0 {
state.SetBalance(addr, account.Balance)
}
}
}
}
Expand Down
31 changes: 7 additions & 24 deletions consensus/bor/bor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,6 @@ func TestGenesisContractChange(t *testing.T) {
"balance": "0x1000",
},
},
"6": map[string]interface{}{
addr0.Hex(): map[string]interface{}{
"code": hexutil.Bytes{0x1, 0x4},
"balance": "0x2000",
},
},
},
},
}
Expand Down Expand Up @@ -91,35 +85,24 @@ func TestGenesisContractChange(t *testing.T) {

root := genesis.Root()

// code does not change, balance remains 0
// code does not change
root, statedb = addBlock(root, 1)
require.Equal(t, statedb.GetCode(addr0), []byte{0x1, 0x1})
require.Equal(t, statedb.GetBalance(addr0), big.NewInt(0))

// code changes 1st time, balance remains 0
// code changes 1st time
root, statedb = addBlock(root, 2)
require.Equal(t, statedb.GetCode(addr0), []byte{0x1, 0x2})
require.Equal(t, statedb.GetBalance(addr0), big.NewInt(0))

// code same as 1st change, balance remains 0
// code same as 1st change
root, statedb = addBlock(root, 3)
require.Equal(t, statedb.GetCode(addr0), []byte{0x1, 0x2})
require.Equal(t, statedb.GetBalance(addr0), big.NewInt(0))

// code changes 2nd time, balance updates to 4096
root, statedb = addBlock(root, 4)
require.Equal(t, statedb.GetCode(addr0), []byte{0x1, 0x3})
require.Equal(t, statedb.GetBalance(addr0), big.NewInt(4096))

// code same as 2nd change, balance remains 4096
root, statedb = addBlock(root, 5)
// code changes 2nd time
_, statedb = addBlock(root, 4)
require.Equal(t, statedb.GetCode(addr0), []byte{0x1, 0x3})
require.Equal(t, statedb.GetBalance(addr0), big.NewInt(4096))

// code changes 3rd time, balance remains 4096
_, statedb = addBlock(root, 6)
require.Equal(t, statedb.GetCode(addr0), []byte{0x1, 0x4})
require.Equal(t, statedb.GetBalance(addr0), big.NewInt(4096))
// make sure balance change DOES NOT take effect
require.Equal(t, statedb.GetBalance(addr0), big.NewInt(0))
}

func TestEncodeSigHeaderJaipur(t *testing.T) {
Expand Down
3 changes: 2 additions & 1 deletion consensus/bor/heimdall/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"io"
"net/http"
"net/url"
"path"
"sort"
"time"

Expand Down Expand Up @@ -420,7 +421,7 @@ func makeURL(urlString, rawPath, rawQuery string) (*url.URL, error) {
return nil, err
}

u.Path = rawPath
u.Path = path.Join(u.Path, rawPath)
u.RawQuery = rawQuery

return u, err
Expand Down
4 changes: 2 additions & 2 deletions consensus/bor/heimdall/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -399,7 +399,7 @@ func TestSpanURL(t *testing.T) {
const expected = "http://bor0/bor/span/1"

if url.String() != expected {
t.Fatalf("expected URL %q, got %q", url.String(), expected)
t.Fatalf("expected URL %q, got %q", expected, url.String())
}
}

Expand All @@ -414,6 +414,6 @@ func TestStateSyncURL(t *testing.T) {
const expected = "http://bor0/clerk/event-record/list?from-id=10&to-time=100&limit=50"

if url.String() != expected {
t.Fatalf("expected URL %q, got %q", url.String(), expected)
t.Fatalf("expected URL %q, got %q", expected, url.String())
}
}
25 changes: 13 additions & 12 deletions consensus/bor/snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,9 @@ import (

// Snapshot is the state of the authorization voting at a given point in time.
type Snapshot struct {
config *params.BorConfig // Consensus engine parameters to fine tune behavior
sigcache *lru.ARCCache // Cache of recent block signatures to speed up ecrecover
chainConfig *params.ChainConfig

sigcache *lru.ARCCache // Cache of recent block signatures to speed up ecrecover

Number uint64 `json:"number"` // Block number where the snapshot was created
Hash common.Hash `json:"hash"` // Block hash where the snapshot was created
Expand All @@ -28,14 +29,14 @@ type Snapshot struct {
// method does not initialize the set of recent signers, so only ever use if for
// the genesis block.
func newSnapshot(
config *params.BorConfig,
chainConfig *params.ChainConfig,
sigcache *lru.ARCCache,
number uint64,
hash common.Hash,
validators []*valset.Validator,
) *Snapshot {
snap := &Snapshot{
config: config,
chainConfig: chainConfig,
sigcache: sigcache,
Number: number,
Hash: hash,
Expand All @@ -47,7 +48,7 @@ func newSnapshot(
}

// loadSnapshot loads an existing snapshot from the database.
func loadSnapshot(config *params.BorConfig, sigcache *lru.ARCCache, db ethdb.Database, hash common.Hash) (*Snapshot, error) {
func loadSnapshot(chainConfig *params.ChainConfig, config *params.BorConfig, sigcache *lru.ARCCache, db ethdb.Database, hash common.Hash) (*Snapshot, error) {
blob, err := db.Get(append([]byte("bor-"), hash[:]...))
if err != nil {
return nil, err
Expand All @@ -61,7 +62,7 @@ func loadSnapshot(config *params.BorConfig, sigcache *lru.ARCCache, db ethdb.Dat

snap.ValidatorSet.UpdateValidatorMap()

snap.config = config
snap.chainConfig = chainConfig
snap.sigcache = sigcache

// update total voting power
Expand All @@ -85,7 +86,7 @@ func (s *Snapshot) store(db ethdb.Database) error {
// copy creates a deep copy of the snapshot, though not the individual votes.
func (s *Snapshot) copy() *Snapshot {
cpy := &Snapshot{
config: s.config,
chainConfig: s.chainConfig,
sigcache: s.sigcache,
Number: s.Number,
Hash: s.Hash,
Expand Down Expand Up @@ -122,12 +123,12 @@ func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) {
number := header.Number.Uint64()

// Delete the oldest signer from the recent list to allow it signing again
if number >= s.config.CalculateSprint(number) {
delete(snap.Recents, number-s.config.CalculateSprint(number))
if number >= s.chainConfig.Bor.CalculateSprint(number) {
delete(snap.Recents, number-s.chainConfig.Bor.CalculateSprint(number))
}

// Resolve the authorization key and check against signers
signer, err := ecrecover(header, s.sigcache, s.config)
signer, err := ecrecover(header, s.sigcache, s.chainConfig.Bor)
if err != nil {
return nil, err
}
Expand All @@ -145,12 +146,12 @@ func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) {
snap.Recents[number] = signer

// change validator set and change proposer
if number > 0 && (number+1)%s.config.CalculateSprint(number) == 0 {
if number > 0 && (number+1)%s.chainConfig.Bor.CalculateSprint(number) == 0 {
if err := validateHeaderExtraField(header.Extra); err != nil {
return nil, err
}

validatorBytes := header.GetValidatorBytes(s.config)
validatorBytes := header.GetValidatorBytes(s.chainConfig)

// get validators from headers and use that for new validator set
newVals, _ := valset.ParseValidators(validatorBytes)
Expand Down
30 changes: 12 additions & 18 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -236,13 +236,14 @@ type BlockChain struct {
stopping atomic.Bool // false if chain is running, true when stopped
procInterrupt atomic.Bool // interrupt signaler for block processing

engine consensus.Engine
validator Validator // Block and state validator interface
prefetcher Prefetcher
processor Processor // Block transaction processor interface
parallelProcessor Processor // Parallel block transaction processor interface
forker *ForkChoice
vmConfig vm.Config
engine consensus.Engine
validator Validator // Block and state validator interface
prefetcher Prefetcher
processor Processor // Block transaction processor interface
parallelProcessor Processor // Parallel block transaction processor interface
parallelSpeculativeProcesses int // Number of parallel speculative processes
forker *ForkChoice
vmConfig vm.Config

// Bor related changes
borReceiptsCache *lru.Cache[common.Hash, *types.Receipt] // Cache for the most recent bor receipt receipts per block
Expand Down Expand Up @@ -408,7 +409,8 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
// The first thing the node will do is reconstruct the verification data for
// the head block (ethash cache or clique voting snapshot). Might as well do
// it in advance.
bc.engine.VerifyHeader(bc, bc.CurrentHeader())
// BOR - commented out intentionally
// bc.engine.VerifyHeader(bc, bc.CurrentHeader())

// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
for hash := range BadHashes {
Expand Down Expand Up @@ -481,7 +483,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
}

// NewParallelBlockChain , similar to NewBlockChain, creates a new blockchain object, but with a parallel state processor
func NewParallelBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis, overrides *ChainOverrides, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(header *types.Header) bool, txLookupLimit *uint64, checker ethereum.ChainValidator) (*BlockChain, error) {
func NewParallelBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis, overrides *ChainOverrides, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(header *types.Header) bool, txLookupLimit *uint64, checker ethereum.ChainValidator, numprocs int) (*BlockChain, error) {
bc, err := NewBlockChain(db, cacheConfig, genesis, overrides, engine, vmConfig, shouldPreserve, txLookupLimit, checker)

if err != nil {
Expand All @@ -500,6 +502,7 @@ func NewParallelBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis
}

bc.parallelProcessor = NewParallelStateProcessor(chainConfig, bc, engine)
bc.parallelSpeculativeProcesses = numprocs

return bc, nil
}
Expand Down Expand Up @@ -2139,15 +2142,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
}

statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps)
if err != nil {
return it.index, err
}

// Enable prefetching to pull in trie node paths while processing transactions
statedb.StartPrefetcher("chain")
activeState = statedb

// If we have a followup block, run that against the current state to pre-cache
// transactions and probabilistically some of the account/storage trie nodes.
var followupInterrupt atomic.Bool
Expand Down
Loading

0 comments on commit 6368b00

Please sign in to comment.