Skip to content
This repository has been archived by the owner on Dec 4, 2024. It is now read-only.

Commit

Permalink
Drop and restart validators - property test
Browse files Browse the repository at this point in the history
  • Loading branch information
stana-miric committed Oct 20, 2023
1 parent 5515483 commit 5f7c08f
Show file tree
Hide file tree
Showing 2 changed files with 88 additions and 8 deletions.
16 changes: 10 additions & 6 deletions consensus/polybft/proposer_calculator.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,11 @@ type PrioritizedValidator struct {
ProposerPriority *big.Int
}

func (pv PrioritizedValidator) String() string {
return fmt.Sprintf("[%v, voting power %v, priority %v]", pv.Metadata.Address.String(),
pv.Metadata.VotingPower, pv.ProposerPriority)
}

// ProposerSnapshot represents snapshot of one proposer calculation
type ProposerSnapshot struct {
Height uint64
Expand Down Expand Up @@ -211,9 +216,7 @@ func (pc *ProposerCalculator) GetSnapshot() (*ProposerSnapshot, bool) {
// PostBlock is called on every insert of finalized block (either from consensus or syncer)
// It will update priorities and save the updated snapshot to db
func (pc *ProposerCalculator) PostBlock(req *PostBlockRequest) error {
blockNumber := req.FullBlock.Block.Number()

return pc.update(blockNumber, req.DBTx)
return pc.update(req.FullBlock.Block.Number(), req.DBTx)
}

func (pc *ProposerCalculator) update(blockNumber uint64, dbTx *bolt.Tx) error {
Expand All @@ -229,15 +232,16 @@ func (pc *ProposerCalculator) update(blockNumber uint64, dbTx *bolt.Tx) error {
return err
}

pc.logger.Debug("Proposers snapshot has been updated", "current block", blockNumber+1,
"validators count", len(pc.snapshot.Validators))
pc.logger.Debug("Proposer snapshot has been updated",
"block", height, "validators", pc.snapshot.Validators)
}

if err := pc.state.ProposerSnapshotStore.writeProposerSnapshot(pc.snapshot, dbTx); err != nil {
return fmt.Errorf("cannot save proposers snapshot for block %d: %w", blockNumber, err)
}

pc.logger.Debug("Update proposers snapshot finished", "target block", blockNumber)
pc.logger.Info("Proposer snapshot update has been finished",
"target block", blockNumber+1, "validators", len(pc.snapshot.Validators))

return nil
}
Expand Down
80 changes: 78 additions & 2 deletions e2e-polybft/property/property_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,14 @@ package property

import (
"fmt"
"math"
"math/big"
"path/filepath"
"sync"
"testing"
"time"

"github.com/stretchr/testify/require"
"github.com/umbracle/ethgo"
"pgregory.net/rapid"

"github.com/0xPolygon/polygon-edge/e2e-polybft/framework"
Expand All @@ -20,7 +21,7 @@ func TestProperty_DifferentVotingPower(t *testing.T) {

const (
blockTime = time.Second * 6
maxStake = math.MaxUint64
maxStake = 20
)

rapid.Check(t, func(tt *rapid.T) {
Expand Down Expand Up @@ -55,3 +56,78 @@ func TestProperty_DifferentVotingPower(t *testing.T) {
require.NoError(t, cluster.WaitForBlock(numBlocks, blockTime*time.Duration(numBlocks)))
})
}

func TestProperty_DropValidators(t *testing.T) {
t.Parallel()

const (
blockTime = time.Second * 4
)

rapid.Check(t, func(tt *rapid.T) {
var (
numNodes = rapid.Uint64Range(5, 8).Draw(tt, "number of cluster nodes")
epochSize = rapid.OneOf(rapid.Just(4), rapid.Just(10)).Draw(tt, "epoch size")
)

cluster := framework.NewPropertyTestCluster(t, int(numNodes),
framework.WithEpochSize(epochSize),
framework.WithSecretsCallback(func(adresses []types.Address, config *framework.TestClusterConfig) {
for range adresses {
config.StakeAmounts = append(config.StakeAmounts, big.NewInt(20))
}
}))
defer cluster.Stop()

t.Logf("Test %v, run with %d nodes, epoch size: %d",
filepath.Base(cluster.Config.LogsDir), numNodes, epochSize)

cluster.WaitForReady(t)

// stop first validator, block production should continue
cluster.Servers[0].Stop()
activeValidator := cluster.Servers[numNodes-1]
currentBlock, err := activeValidator.JSONRPC().Eth().GetBlockByNumber(ethgo.Latest, false)
require.NoError(t, err)
require.NoError(t, cluster.WaitForBlock(currentBlock.Number+1, 2*blockTime))

// drop all validator nodes, leaving one node alive
numNodesToDrop := int(numNodes - 1)

var wg sync.WaitGroup
// drop bulk of nodes from cluster
for i := 1; i < numNodesToDrop; i++ {
node := cluster.Servers[i]

wg.Add(1)

go func(node *framework.TestServer) {
defer wg.Done()
node.Stop()
}(node)
}

wg.Wait()

// check that block production is stoped
currentBlock, err = activeValidator.JSONRPC().Eth().GetBlockByNumber(ethgo.Latest, false)
require.NoError(t, err)
oldBlockNumber := currentBlock.Number
time.Sleep(2 * blockTime)
currentBlock, err = activeValidator.JSONRPC().Eth().GetBlockByNumber(ethgo.Latest, false)
require.NoError(t, err)
require.Equal(t, oldBlockNumber, currentBlock.Number)

// start dropped nodes again
for i := 0; i < numNodesToDrop; i++ {
node := cluster.Servers[i]
node.Start()
}

time.Sleep(2 * blockTime)
currentBlock, err = activeValidator.JSONRPC().Eth().GetBlockByNumber(ethgo.Latest, false)
require.NoError(t, err)
// check that block production is restarted
require.True(t, oldBlockNumber < currentBlock.Number)
})
}

0 comments on commit 5f7c08f

Please sign in to comment.