diff --git a/cmd/node/main.go b/cmd/node/main.go index 22d50979248..932c77934ab 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -13,6 +13,7 @@ import ( "os" "os/signal" "path/filepath" + "runtime/debug" "strconv" "strings" "sync" @@ -64,6 +65,7 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/process/factory/metachain" "github.com/ElrondNetwork/elrond-go-sandbox/process/factory/shard" processSync "github.com/ElrondNetwork/elrond-go-sandbox/process/sync" + "github.com/ElrondNetwork/elrond-go-sandbox/process/track" "github.com/ElrondNetwork/elrond-go-sandbox/process/transaction" "github.com/ElrondNetwork/elrond-go-sandbox/sharding" "github.com/ElrondNetwork/elrond-go-sandbox/storage" @@ -253,6 +255,10 @@ func main() { }, } + //TODO: The next line should be removed when the write in batches is done + // set the maximum allowed OS threads (not go routines) which can run in the same time (the default is 10000) + debug.SetMaxThreads(100000) + app.Action = func(c *cli.Context) error { return startNode(c, log) } @@ -769,6 +775,11 @@ func createShardNode( return nil, nil, nil, err } + blockTracker, err := track.NewShardBlockTracker(datapool, marshalizer, shardCoordinator, store) + if err != nil { + return nil, nil, nil, err + } + blockProcessor, err := block.NewShardProcessor( datapool, store, @@ -778,6 +789,7 @@ func createShardNode( accountsAdapter, shardCoordinator, forkDetector, + blockTracker, createTxRequestHandler(resolversFinder, factory.TransactionTopic, log), createRequestHandler(resolversFinder, factory.MiniBlocksTopic, log), ) @@ -800,6 +812,7 @@ func createShardNode( node.WithConsensusGroupSize(int(nodesConfig.ConsensusGroupSize)), node.WithSyncer(syncer), node.WithBlockProcessor(blockProcessor), + node.WithBlockTracker(blockTracker), node.WithGenesisTime(time.Unix(nodesConfig.StartTime, 0)), node.WithRounder(rounder), node.WithDataPool(datapool), @@ -818,6 +831,7 @@ func createShardNode( node.WithConsensusType(config.Consensus.Type), node.WithTxSingleSigner(txSingleSigner), node.WithActiveMetachain(nodesConfig.MetaChainActive), + node.WithTxStorageSize(config.TxStorage.Cache.Size), ) if err != nil { return nil, nil, nil, errors.New("error creating node: " + err.Error()) @@ -1049,6 +1063,11 @@ func createMetaNode( return nil, nil, nil, err } + blockTracker, err := track.NewMetaBlockTracker() + if err != nil { + return nil, nil, nil, err + } + shardsGenesisBlocks, err := generateGenesisHeadersForMetachainInit( nodesConfig, genesisConfig, @@ -1093,6 +1112,7 @@ func createMetaNode( node.WithConsensusGroupSize(int(nodesConfig.MetaChainConsensusGroupSize)), node.WithSyncer(syncer), node.WithBlockProcessor(metaProcessor), + node.WithBlockTracker(blockTracker), node.WithGenesisTime(time.Unix(nodesConfig.StartTime, 0)), node.WithRounder(rounder), node.WithMetaDataPool(metaDatapool), @@ -1110,6 +1130,7 @@ func createMetaNode( node.WithResolversFinder(resolversFinder), node.WithConsensusType(config.Consensus.Type), node.WithTxSingleSigner(txSingleSigner), + node.WithTxStorageSize(config.TxStorage.Cache.Size), ) if err != nil { return nil, nil, nil, errors.New("error creating meta-node: " + err.Error()) diff --git a/consensus/mock/blockProcessorMock.go b/consensus/mock/blockProcessorMock.go index 9d93408f340..287963c90b5 100644 --- a/consensus/mock/blockProcessorMock.go +++ b/consensus/mock/blockProcessorMock.go @@ -22,11 +22,6 @@ type BlockProcessorMock struct { DecodeBlockHeaderCalled func(dta []byte) data.HeaderHandler } -// SetOnRequestTransaction mocks setting request transaction call back function -func (blProcMock *BlockProcessorMock) SetOnRequestTransaction(f func(destShardID uint32, txHash []byte)) { - blProcMock.SetOnRequestTransactionCalled(f) -} - // ProcessBlock mocks pocessing a block func (blProcMock *BlockProcessorMock) ProcessBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { return blProcMock.ProcessBlockCalled(blockChain, header, body, haveTime) @@ -42,11 +37,6 @@ func (blProcMock *BlockProcessorMock) RevertAccountState() { blProcMock.RevertAccountStateCalled() } -// CreateGenesisBlock mocks the creation of a genesis block body -func (blProcMock *BlockProcessorMock) CreateGenesisBlock(balances map[string]*big.Int) (data.HeaderHandler, error) { - return blProcMock.CreateGenesisBlockCalled(balances) -} - // CreateTxBlockBody mocks the creation of a transaction block body func (blProcMock *BlockProcessorMock) CreateBlockBody(round int32, haveTime func() bool) (data.BodyHandler, error) { return blProcMock.CreateBlockCalled(round, haveTime) diff --git a/consensus/mock/blocksTrackerMock.go b/consensus/mock/blocksTrackerMock.go new file mode 100644 index 00000000000..5c516071da0 --- /dev/null +++ b/consensus/mock/blocksTrackerMock.go @@ -0,0 +1,33 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/data" +) + +type BlocksTrackerMock struct { + UnnotarisedBlocksCalled func() []data.HeaderHandler + RemoveNotarisedBlocksCalled func(headerHandler data.HeaderHandler) error + AddBlockCalled func(headerHandler data.HeaderHandler) + SetBlockBroadcastRoundCalled func(nonce uint64, round int32) + BlockBroadcastRoundCalled func(nonce uint64) int32 +} + +func (btm *BlocksTrackerMock) UnnotarisedBlocks() []data.HeaderHandler { + return btm.UnnotarisedBlocksCalled() +} + +func (btm *BlocksTrackerMock) RemoveNotarisedBlocks(headerHandler data.HeaderHandler) error { + return btm.RemoveNotarisedBlocksCalled(headerHandler) +} + +func (btm *BlocksTrackerMock) AddBlock(headerHandler data.HeaderHandler) { + btm.AddBlockCalled(headerHandler) +} + +func (btm *BlocksTrackerMock) SetBlockBroadcastRound(nonce uint64, round int32) { + btm.SetBlockBroadcastRoundCalled(nonce, round) +} + +func (btm *BlocksTrackerMock) BlockBroadcastRound(nonce uint64) int32 { + return btm.BlockBroadcastRoundCalled(nonce) +} diff --git a/consensus/mock/mockTestInitializer.go b/consensus/mock/mockTestInitializer.go index c921b0b148b..b54d9a25ad9 100644 --- a/consensus/mock/mockTestInitializer.go +++ b/consensus/mock/mockTestInitializer.go @@ -37,6 +37,7 @@ func InitBlockProcessorMock() *BlockProcessorMock { blockProcessorMock.DecodeBlockHeaderCalled = func(dta []byte) data.HeaderHandler { return &block.Header{} } + return blockProcessorMock } diff --git a/consensus/mock/sposWorkerMock.go b/consensus/mock/sposWorkerMock.go index 6e0910cf7b4..6a8e33263af 100644 --- a/consensus/mock/sposWorkerMock.go +++ b/consensus/mock/sposWorkerMock.go @@ -15,7 +15,9 @@ type SposWorkerMock struct { ExtendCalled func(subroundId int) GetConsensusStateChangedChannelsCalled func() chan bool GetBroadcastBlockCalled func(data.BodyHandler, data.HeaderHandler) error + GetBroadcastHeaderCalled func(data.HeaderHandler) error ExecuteStoredMessagesCalled func() + BroadcastUnnotarisedBlocksCalled func() } func (sposWorkerMock *SposWorkerMock) AddReceivedMessageCall(messageType consensus.MessageType, @@ -50,3 +52,7 @@ func (sposWorkerMock *SposWorkerMock) BroadcastBlock(body data.BodyHandler, head func (sposWorkerMock *SposWorkerMock) ExecuteStoredMessages() { sposWorkerMock.ExecuteStoredMessagesCalled() } + +func (sposWorkerMock *SposWorkerMock) BroadcastUnnotarisedBlocks() { + sposWorkerMock.BroadcastUnnotarisedBlocksCalled() +} diff --git a/consensus/spos/bls/blsSubroundsFactory.go b/consensus/spos/bls/blsSubroundsFactory.go index ed5d74248ae..523c9a520a6 100644 --- a/consensus/spos/bls/blsSubroundsFactory.go +++ b/consensus/spos/bls/blsSubroundsFactory.go @@ -113,6 +113,7 @@ func (fct *factory) generateStartRoundSubround() error { processingThresholdPercent, getSubroundName, fct.worker.ExecuteStoredMessages, + fct.worker.BroadcastUnnotarisedBlocks, ) if err != nil { return err diff --git a/consensus/spos/bls/subroundEndRound.go b/consensus/spos/bls/subroundEndRound.go index b7a8beb9faf..d268c432bcb 100644 --- a/consensus/spos/bls/subroundEndRound.go +++ b/consensus/spos/bls/subroundEndRound.go @@ -2,6 +2,7 @@ package bls import ( "fmt" + "time" "github.com/ElrondNetwork/elrond-go-sandbox/consensus/spos" "github.com/ElrondNetwork/elrond-go-sandbox/data" @@ -80,12 +81,16 @@ func (sr *subroundEndRound) doEndRoundJob() bool { sr.Header.SetPubKeysBitmap(bitmap) sr.Header.SetSignature(sig) + timeBefore := time.Now() // Commit the block (commits also the account state) err = sr.BlockProcessor().CommitBlock(sr.Blockchain(), sr.ConsensusState.Header, sr.ConsensusState.BlockBody) if err != nil { log.Error(err.Error()) return false } + timeAfter := time.Now() + + log.Info(fmt.Sprintf("time elapsed to commit block: %v sec\n", timeAfter.Sub(timeBefore).Seconds())) sr.SetStatus(SrEndRound, spos.SsFinished) @@ -95,7 +100,7 @@ func (sr *subroundEndRound) doEndRoundJob() bool { log.Error(err.Error()) } - log.Info(fmt.Sprintf("%sStep 3: BlockBody and Header has been commited and broadcasted \n", sr.SyncTimer().FormattedCurrentTime())) + log.Info(fmt.Sprintf("%sStep 3: BlockBody and Header has been committed and broadcast\n", sr.SyncTimer().FormattedCurrentTime())) msg := fmt.Sprintf("Added proposed block with nonce %d in blockchain", sr.Header.GetNonce()) log.Info(log.Headline(msg, sr.SyncTimer().FormattedCurrentTime(), "+")) diff --git a/consensus/spos/bn/bnSubroundsFactory.go b/consensus/spos/bn/bnSubroundsFactory.go index cf18eb8729e..727d857cc16 100644 --- a/consensus/spos/bn/bnSubroundsFactory.go +++ b/consensus/spos/bn/bnSubroundsFactory.go @@ -133,6 +133,7 @@ func (fct *factory) generateStartRoundSubround() error { processingThresholdPercent, getSubroundName, fct.worker.ExecuteStoredMessages, + fct.worker.BroadcastUnnotarisedBlocks, ) if err != nil { diff --git a/consensus/spos/bn/subroundEndRound.go b/consensus/spos/bn/subroundEndRound.go index 60115f01fe0..0377ac89f51 100644 --- a/consensus/spos/bn/subroundEndRound.go +++ b/consensus/spos/bn/subroundEndRound.go @@ -2,6 +2,7 @@ package bn import ( "fmt" + "time" "github.com/ElrondNetwork/elrond-go-sandbox/consensus/spos" "github.com/ElrondNetwork/elrond-go-sandbox/data" @@ -45,11 +46,9 @@ func checkNewSubroundEndRoundParams( if baseSubround == nil { return spos.ErrNilSubround } - if baseSubround.ConsensusState == nil { return spos.ErrNilConsensusState } - if broadcastBlock == nil { return spos.ErrNilBroadcastBlockFunction } @@ -77,12 +76,16 @@ func (sr *subroundEndRound) doEndRoundJob() bool { sr.Header.SetSignature(sig) + timeBefore := time.Now() // Commit the block (commits also the account state) err = sr.BlockProcessor().CommitBlock(sr.Blockchain(), sr.ConsensusState.Header, sr.ConsensusState.BlockBody) if err != nil { log.Error(err.Error()) return false } + timeAfter := time.Now() + + log.Info(fmt.Sprintf("time elapsed to commit block: %v sec\n", timeAfter.Sub(timeBefore).Seconds())) sr.SetStatus(SrEndRound, spos.SsFinished) @@ -92,7 +95,7 @@ func (sr *subroundEndRound) doEndRoundJob() bool { log.Error(err.Error()) } - log.Info(fmt.Sprintf("%sStep 6: TxBlockBody and Header has been commited and broadcasted \n", sr.SyncTimer().FormattedCurrentTime())) + log.Info(fmt.Sprintf("%sStep 6: TxBlockBody and Header has been committed and broadcast\n", sr.SyncTimer().FormattedCurrentTime())) actionMsg := "synchronized" if sr.IsSelfLeaderInCurrentRound() { diff --git a/consensus/spos/commonSubround/base_test.go b/consensus/spos/commonSubround/base_test.go index 08493c9bd50..7bb3f3feb1f 100644 --- a/consensus/spos/commonSubround/base_test.go +++ b/consensus/spos/commonSubround/base_test.go @@ -4,7 +4,6 @@ import ( "time" "github.com/ElrondNetwork/elrond-go-sandbox/consensus/mock" - "github.com/ElrondNetwork/elrond-go-sandbox/consensus/spos" ) @@ -116,3 +115,6 @@ func getSubroundName(subroundId int) string { // executeStoredMessages tries to execute all the messages received which are valid for execution func executeStoredMessages() { } + +func broadcastUnnotarisedBlocks() { +} diff --git a/consensus/spos/commonSubround/subroundStartRound.go b/consensus/spos/commonSubround/subroundStartRound.go index 69cfa3b11fd..1d4002fe002 100644 --- a/consensus/spos/commonSubround/subroundStartRound.go +++ b/consensus/spos/commonSubround/subroundStartRound.go @@ -5,10 +5,9 @@ import ( "fmt" "time" + "github.com/ElrondNetwork/elrond-go-sandbox/consensus/spos" "github.com/ElrondNetwork/elrond-go-sandbox/core" "github.com/ElrondNetwork/elrond-go-sandbox/core/logger" - - "github.com/ElrondNetwork/elrond-go-sandbox/consensus/spos" ) var log = logger.DefaultLogger() @@ -19,6 +18,7 @@ type SubroundStartRound struct { processingThresholdPercentage int getSubroundName func(subroundId int) string executeStoredMessages func() + broadcastUnnotarisedBlocks func() } // NewSubroundStartRound creates a SubroundStartRound object @@ -28,9 +28,11 @@ func NewSubroundStartRound( processingThresholdPercentage int, getSubroundName func(subroundId int) string, executeStoredMessages func(), + broadcastUnnotarisedBlocks func(), ) (*SubroundStartRound, error) { err := checkNewSubroundStartRoundParams( baseSubround, + broadcastUnnotarisedBlocks, ) if err != nil { return nil, err @@ -41,6 +43,7 @@ func NewSubroundStartRound( processingThresholdPercentage, getSubroundName, executeStoredMessages, + broadcastUnnotarisedBlocks, } srStartRound.Job = srStartRound.doStartRoundJob srStartRound.Check = srStartRound.doStartRoundConsensusCheck @@ -51,14 +54,17 @@ func NewSubroundStartRound( func checkNewSubroundStartRoundParams( baseSubround *spos.Subround, + broadcastUnnotarisedBlocks func(), ) error { if baseSubround == nil { return spos.ErrNilSubround } - if baseSubround.ConsensusState == nil { return spos.ErrNilConsensusState } + if broadcastUnnotarisedBlocks == nil { + return spos.ErrNilBroadcastUnnotarisedBlocks + } err := spos.ValidateConsensusCore(baseSubround.ConsensusCoreHandler) @@ -156,6 +162,10 @@ func (sr *SubroundStartRound) initCurrentRound() bool { sr.SetStatus(sr.Current(), spos.SsFinished) + if leader == sr.SelfPubKey() { + sr.broadcastUnnotarisedBlocks() + } + // execute stored messages which were received in this new round but before this initialisation go sr.executeStoredMessages() diff --git a/consensus/spos/commonSubround/subroundStartRound_test.go b/consensus/spos/commonSubround/subroundStartRound_test.go index 773645d0581..91eed22af7f 100644 --- a/consensus/spos/commonSubround/subroundStartRound_test.go +++ b/consensus/spos/commonSubround/subroundStartRound_test.go @@ -19,6 +19,7 @@ func defaultSubroundStartRoundFromSubround(sr *spos.Subround) (*commonSubround.S processingThresholdPercent, getSubroundName, executeStoredMessages, + broadcastUnnotarisedBlocks, ) return startRound, err @@ -49,6 +50,7 @@ func initSubroundStartRoundWithContainer(container spos.ConsensusCoreHandler) *c processingThresholdPercent, getSubroundName, executeStoredMessages, + broadcastUnnotarisedBlocks, ) return srStartRound @@ -68,6 +70,7 @@ func TestSubroundStartRound_NewSubroundStartRoundNilSubroundShouldFail(t *testin processingThresholdPercent, getSubroundName, executeStoredMessages, + broadcastUnnotarisedBlocks, ) assert.Nil(t, srStartRound) @@ -122,6 +125,28 @@ func TestSubroundStartRound_NewSubroundStartRoundNilConsensusStateShouldFail(t * assert.Equal(t, spos.ErrNilConsensusState, err) } +func TestSubroundStartRound_NewSubroundStartRoundNilBroadcastUnnotarisedBlocksFunctionShouldFail(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + consensusState := initConsensusState() + ch := make(chan bool, 1) + + sr, _ := defaultSubround(consensusState, ch, container) + + srStartRound, err := commonSubround.NewSubroundStartRound( + sr, + extend, + processingThresholdPercent, + getSubroundName, + executeStoredMessages, + nil, + ) + + assert.Nil(t, srStartRound) + assert.Equal(t, spos.ErrNilBroadcastUnnotarisedBlocks, err) +} + func TestSubroundStartRound_NewSubroundStartRoundNilMultiSignerShouldFail(t *testing.T) { t.Parallel() diff --git a/consensus/spos/constants.go b/consensus/spos/constants.go index aeb8bf850e1..f7e15fe1c52 100644 --- a/consensus/spos/constants.go +++ b/consensus/spos/constants.go @@ -2,3 +2,7 @@ package spos // maxThresholdPercent specifies the max allocated time percent for doing Job as a percentage of the total time of one round const maxThresholdPercent = 75 + +// MaxRoundsGap defines the maximum expected gap in terms of rounds, between metachain and shardchain, after which +// a block committed and broadcast from shardchain would be visible as notarized in metachain +const MaxRoundsGap = 3 diff --git a/consensus/spos/errors.go b/consensus/spos/errors.go index 9ecc3fd196a..145c87fb2bb 100644 --- a/consensus/spos/errors.go +++ b/consensus/spos/errors.go @@ -61,6 +61,9 @@ var ErrNilMarshalizer = errors.New("marshalizer is nil") // ErrNilBlockProcessor is raised when a valid block processor is expected but nil used var ErrNilBlockProcessor = errors.New("block processor is nil") +// ErrNilBlockTracker is raised when a valid block tracker is expected but nil used +var ErrNilBlockTracker = errors.New("block tracker is nil") + // ErrNilBlootstraper is raised when a valid block processor is expected but nil used var ErrNilBlootstraper = errors.New("boostraper is nil") @@ -133,8 +136,14 @@ var ErrNilBlsSingleSigner = errors.New("BLS single signer should not be nil") // ErrNilHeader is raised when an expected header is nil var ErrNilHeader = errors.New("header is nil") -// ErrNilBroadCastBlock is raised when a valid broadcastBlock function is expected but nil used -var ErrNilBroadCastBlock = errors.New("broadCastBlock is nil") +// ErrNilBroadcastBlock is raised when a valid broadcastBlock function is expected but nil used +var ErrNilBroadcastBlock = errors.New("broadcastBlock is nil") + +// ErrNilBroadcastHeader is raised when a valid broadcastHeader function is expected but nil used +var ErrNilBroadcastHeader = errors.New("broadcastHeader is nil") + +// ErrNilBroadcastUnnotarisedBlocks is raised when a valid broadcastUnnotarisedBlocks function is expected but nil used +var ErrNilBroadcastUnnotarisedBlocks = errors.New("broadcastUnnotarisedBlocks is nil") // ErrNilSendMessage is raised when a valid sendMessage function is expected but nil used var ErrNilSendMessage = errors.New("sendMessage is nil") diff --git a/consensus/spos/export_test.go b/consensus/spos/export_test.go index 081f4eb4c20..3da340970f5 100644 --- a/consensus/spos/export_test.go +++ b/consensus/spos/export_test.go @@ -20,6 +20,14 @@ func (wrk *Worker) SetBlockProcessor(blockProcessor process.BlockProcessor) { wrk.blockProcessor = blockProcessor } +func (wrk *Worker) BlockTracker() process.BlocksTracker { + return wrk.blockTracker +} + +func (wrk *Worker) SetBlockTracker(blockTracker process.BlocksTracker) { + wrk.blockTracker = blockTracker +} + func (wrk *Worker) Bootstraper() process.Bootstrapper { return wrk.bootstraper } @@ -36,6 +44,14 @@ func (wrk *Worker) SetConsensusState(consensusState *ConsensusState) { wrk.consensusState = consensusState } +func (wrk *Worker) ForkDetector() process.ForkDetector { + return wrk.forkDetector +} + +func (wrk *Worker) SetForkDetector(forkDetector process.ForkDetector) { + wrk.forkDetector = forkDetector +} + func (wrk *Worker) KeyGenerator() crypto.KeyGenerator { return wrk.keyGenerator } @@ -128,6 +144,10 @@ func (wrk *Worker) SetSendMessage(sendMessage func(consensus *consensus.Message) wrk.sendMessage = sendMessage } -func (wrk *Worker) SetBroadCastBlock(broadcastBlock func(data.BodyHandler, data.HeaderHandler) error) { +func (wrk *Worker) SetBroadcastBlock(broadcastBlock func(data.BodyHandler, data.HeaderHandler) error) { wrk.broadcastBlock = broadcastBlock } + +func (wrk *Worker) SetBroadcastHeader(broadcastHeader func(data.HeaderHandler) error) { + wrk.broadcastHeader = broadcastHeader +} diff --git a/consensus/spos/interface.go b/consensus/spos/interface.go index 4e4c8c56109..ce8201a8a5a 100644 --- a/consensus/spos/interface.go +++ b/consensus/spos/interface.go @@ -83,4 +83,6 @@ type WorkerHandler interface { BroadcastBlock(body data.BodyHandler, header data.HeaderHandler) error //ExecuteStoredMessages tries to execute all the messages received which are valid for execution ExecuteStoredMessages() + //BroadcastUnnotarisedBlocks broadcasts all blocks which are not notarised yet + BroadcastUnnotarisedBlocks() } diff --git a/consensus/spos/worker.go b/consensus/spos/worker.go index 4a1326e5d45..d4dfe8228e7 100644 --- a/consensus/spos/worker.go +++ b/consensus/spos/worker.go @@ -12,6 +12,7 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/crypto" "github.com/ElrondNetwork/elrond-go-sandbox/data" "github.com/ElrondNetwork/elrond-go-sandbox/marshal" + "github.com/ElrondNetwork/elrond-go-sandbox/ntp" "github.com/ElrondNetwork/elrond-go-sandbox/p2p" "github.com/ElrondNetwork/elrond-go-sandbox/process" "github.com/ElrondNetwork/elrond-go-sandbox/sharding" @@ -21,6 +22,7 @@ import ( type Worker struct { consensusService ConsensusService blockProcessor process.BlockProcessor + blockTracker process.BlocksTracker bootstraper process.Bootstrapper consensusState *ConsensusState forkDetector process.ForkDetector @@ -30,6 +32,7 @@ type Worker struct { rounder consensus.Rounder shardCoordinator sharding.Coordinator singleSigner crypto.SingleSigner + syncTimer ntp.SyncTimer receivedMessages map[consensus.MessageType][]*consensus.Message receivedMessagesCalls map[consensus.MessageType]func(*consensus.Message) bool @@ -37,8 +40,9 @@ type Worker struct { executeMessageChannel chan *consensus.Message consensusStateChangedChannel chan bool - broadcastBlock func(data.BodyHandler, data.HeaderHandler) error - sendMessage func(consensus *consensus.Message) + broadcastBlock func(data.BodyHandler, data.HeaderHandler) error + broadcastHeader func(data.HeaderHandler) error + sendMessage func(consensus *consensus.Message) mutReceivedMessages sync.RWMutex mutReceivedMessagesCalls sync.RWMutex @@ -48,6 +52,7 @@ type Worker struct { func NewWorker( consensusService ConsensusService, blockProcessor process.BlockProcessor, + blockTracker process.BlocksTracker, bootstraper process.Bootstrapper, consensusState *ConsensusState, forkDetector process.ForkDetector, @@ -57,12 +62,15 @@ func NewWorker( rounder consensus.Rounder, shardCoordinator sharding.Coordinator, singleSigner crypto.SingleSigner, + syncTimer ntp.SyncTimer, broadcastBlock func(data.BodyHandler, data.HeaderHandler) error, + broadcastHeader func(data.HeaderHandler) error, sendMessage func(consensus *consensus.Message), ) (*Worker, error) { err := checkNewWorkerParams( consensusService, blockProcessor, + blockTracker, bootstraper, consensusState, forkDetector, @@ -72,7 +80,9 @@ func NewWorker( rounder, shardCoordinator, singleSigner, + syncTimer, broadcastBlock, + broadcastHeader, sendMessage, ) if err != nil { @@ -82,6 +92,7 @@ func NewWorker( wrk := Worker{ consensusService: consensusService, blockProcessor: blockProcessor, + blockTracker: blockTracker, bootstraper: bootstraper, consensusState: consensusState, forkDetector: forkDetector, @@ -91,7 +102,9 @@ func NewWorker( rounder: rounder, shardCoordinator: shardCoordinator, singleSigner: singleSigner, + syncTimer: syncTimer, broadcastBlock: broadcastBlock, + broadcastHeader: broadcastHeader, sendMessage: sendMessage, } @@ -109,6 +122,7 @@ func NewWorker( func checkNewWorkerParams( consensusService ConsensusService, blockProcessor process.BlockProcessor, + blockTracker process.BlocksTracker, bootstraper process.Bootstrapper, consensusState *ConsensusState, forkDetector process.ForkDetector, @@ -118,7 +132,9 @@ func checkNewWorkerParams( rounder consensus.Rounder, shardCoordinator sharding.Coordinator, singleSigner crypto.SingleSigner, + syncTimer ntp.SyncTimer, broadcastBlock func(data.BodyHandler, data.HeaderHandler) error, + broadcastHeader func(data.HeaderHandler) error, sendMessage func(consensus *consensus.Message), ) error { if consensusService == nil { @@ -127,6 +143,9 @@ func checkNewWorkerParams( if blockProcessor == nil { return ErrNilBlockProcessor } + if blockTracker == nil { + return ErrNilBlockTracker + } if bootstraper == nil { return ErrNilBlootstraper } @@ -154,8 +173,14 @@ func checkNewWorkerParams( if singleSigner == nil { return ErrNilSingleSigner } + if syncTimer == nil { + return ErrNilSyncTimer + } if broadcastBlock == nil { - return ErrNilBroadCastBlock + return ErrNilBroadcastBlock + } + if broadcastHeader == nil { + return ErrNilBroadcastHeader } if sendMessage == nil { return ErrNilSendMessage @@ -433,6 +458,33 @@ func (wrk *Worker) BroadcastBlock(body data.BodyHandler, header data.HeaderHandl return wrk.broadcastBlock(body, header) } +//BroadcastUnnotarisedBlocks broadcasts all blocks which are not notarised yet +func (wrk *Worker) BroadcastUnnotarisedBlocks() { + headers := wrk.blockTracker.UnnotarisedBlocks() + for _, header := range headers { + if header.GetNonce() > wrk.forkDetector.GetHighestFinalBlockNonce() { + continue + } + + brodcastRound := wrk.blockTracker.BlockBroadcastRound(header.GetNonce()) + if brodcastRound >= wrk.consensusState.RoundIndex-MaxRoundsGap { + continue + } + + err := wrk.broadcastHeader(header) + if err != nil { + log.Error(err.Error()) + continue + } + + wrk.blockTracker.SetBlockBroadcastRound(header.GetNonce(), wrk.consensusState.RoundIndex) + + log.Info(fmt.Sprintf("%sStep 0: Unnotarised header with nonce %d has been broadcast to metachain\n", + wrk.syncTimer.FormattedCurrentTime(), + header.GetNonce())) + } +} + //ExecuteStoredMessages tries to execute all the messages received which are valid for execution func (wrk *Worker) ExecuteStoredMessages() { wrk.mutReceivedMessages.Lock() diff --git a/consensus/spos/worker_test.go b/consensus/spos/worker_test.go index 44e4b8a2201..325c0ef0538 100644 --- a/consensus/spos/worker_test.go +++ b/consensus/spos/worker_test.go @@ -30,6 +30,11 @@ func broadcastBlock(txBlockBody data.BodyHandler, header data.HeaderHandler) err return nil } +func broadcastHeader(header data.HeaderHandler) error { + fmt.Println(header) + return nil +} + func initWorker() *spos.Worker { blockProcessor := &mock.BlockProcessorMock{ DecodeBlockHeaderCalled: func(dta []byte) data.HeaderHandler { @@ -38,6 +43,7 @@ func initWorker() *spos.Worker { RevertAccountStateCalled: func() { }, } + blockTrackerMock := &mock.BlocksTrackerMock{} bootstraperMock := &mock.BootstraperMock{} consensusState := initConsensusState() forkDetectorMock := &mock.ForkDetectorMock{} @@ -56,12 +62,14 @@ func initWorker() *spos.Worker { return nil }, } + syncTimerMock := &mock.SyncTimerMock{} bnService, _ := bn.NewConsensusService() sposWorker, _ := spos.NewWorker( bnService, blockProcessor, + blockTrackerMock, bootstraperMock, consensusState, forkDetectorMock, @@ -71,7 +79,9 @@ func initWorker() *spos.Worker { rounderMock, shardCoordinatorMock, singleSignerMock, + syncTimerMock, broadcastBlock, + broadcastHeader, sendMessage) return sposWorker @@ -93,6 +103,7 @@ func TestWorker_NewWorkerConsensusServiceNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} + blockTrackerMock := &mock.BlocksTrackerMock{} bootstraperMock := &mock.BootstraperMock{} consensusState := initConsensusState() forkDetectorMock := &mock.ForkDetectorMock{} @@ -102,9 +113,11 @@ func TestWorker_NewWorkerConsensusServiceNilShouldFail(t *testing.T) { rounderMock := initRounderMock() shardCoordinatorMock := mock.ShardCoordinatorMock{} singleSignerMock := &mock.SingleSignerMock{} + syncTimerMock := &mock.SyncTimerMock{} wrk, err := spos.NewWorker(nil, blockProcessor, + blockTrackerMock, bootstraperMock, consensusState, forkDetectorMock, @@ -114,7 +127,9 @@ func TestWorker_NewWorkerConsensusServiceNilShouldFail(t *testing.T) { rounderMock, shardCoordinatorMock, singleSignerMock, + syncTimerMock, broadcastBlock, + broadcastHeader, sendMessage, ) @@ -122,8 +137,10 @@ func TestWorker_NewWorkerConsensusServiceNilShouldFail(t *testing.T) { assert.Equal(t, spos.ErrNilConsensusService, err) } -func TestWorker_NewWorkerBlockprocessorNilShouldFail(t *testing.T) { +func TestWorker_NewWorkerBlockProcessorNilShouldFail(t *testing.T) { t.Parallel() + + blockTrackerMock := &mock.BlocksTrackerMock{} bootstraperMock := &mock.BootstraperMock{} consensusState := initConsensusState() forkDetectorMock := &mock.ForkDetectorMock{} @@ -133,10 +150,12 @@ func TestWorker_NewWorkerBlockprocessorNilShouldFail(t *testing.T) { rounderMock := initRounderMock() shardCoordinatorMock := mock.ShardCoordinatorMock{} singleSignerMock := &mock.SingleSignerMock{} + syncTimerMock := &mock.SyncTimerMock{} bnService, _ := bn.NewConsensusService() wrk, err := spos.NewWorker(bnService, nil, + blockTrackerMock, bootstraperMock, consensusState, forkDetectorMock, @@ -146,7 +165,9 @@ func TestWorker_NewWorkerBlockprocessorNilShouldFail(t *testing.T) { rounderMock, shardCoordinatorMock, singleSignerMock, + syncTimerMock, broadcastBlock, + broadcastHeader, sendMessage, ) @@ -154,9 +175,49 @@ func TestWorker_NewWorkerBlockprocessorNilShouldFail(t *testing.T) { assert.Equal(t, spos.ErrNilBlockProcessor, err) } +func TestWorker_NewWorkerBlockTrackerNilShouldFail(t *testing.T) { + t.Parallel() + + blockProcessor := &mock.BlockProcessorMock{} + bootstraperMock := &mock.BootstraperMock{} + consensusState := initConsensusState() + forkDetectorMock := &mock.ForkDetectorMock{} + keyGeneratorMock := &mock.KeyGenMock{} + marshalizerMock := mock.MarshalizerMock{} + privateKeyMock := &mock.PrivateKeyMock{} + rounderMock := initRounderMock() + shardCoordinatorMock := mock.ShardCoordinatorMock{} + singleSignerMock := &mock.SingleSignerMock{} + syncTimerMock := &mock.SyncTimerMock{} + bnService, _ := bn.NewConsensusService() + + wrk, err := spos.NewWorker(bnService, + blockProcessor, + nil, + bootstraperMock, + consensusState, + forkDetectorMock, + keyGeneratorMock, + marshalizerMock, + privateKeyMock, + rounderMock, + shardCoordinatorMock, + singleSignerMock, + syncTimerMock, + broadcastBlock, + broadcastHeader, + sendMessage, + ) + + assert.Nil(t, wrk) + assert.Equal(t, spos.ErrNilBlockTracker, err) +} + func TestWorker_NewWorkerBoostraperNilShouldFail(t *testing.T) { t.Parallel() + blockProcessor := &mock.BlockProcessorMock{} + blockTrackerMock := &mock.BlocksTrackerMock{} consensusState := initConsensusState() forkDetectorMock := &mock.ForkDetectorMock{} keyGeneratorMock := &mock.KeyGenMock{} @@ -165,11 +226,13 @@ func TestWorker_NewWorkerBoostraperNilShouldFail(t *testing.T) { rounderMock := initRounderMock() shardCoordinatorMock := mock.ShardCoordinatorMock{} singleSignerMock := &mock.SingleSignerMock{} + syncTimerMock := &mock.SyncTimerMock{} bnService, _ := bn.NewConsensusService() wrk, err := spos.NewWorker( bnService, blockProcessor, + blockTrackerMock, nil, consensusState, forkDetectorMock, @@ -179,7 +242,9 @@ func TestWorker_NewWorkerBoostraperNilShouldFail(t *testing.T) { rounderMock, shardCoordinatorMock, singleSignerMock, + syncTimerMock, broadcastBlock, + broadcastHeader, sendMessage, ) @@ -190,6 +255,7 @@ func TestWorker_NewWorkerBoostraperNilShouldFail(t *testing.T) { func TestWorker_NewWorkerConsensusStateNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} + blockTrackerMock := &mock.BlocksTrackerMock{} bootstraperMock := &mock.BootstraperMock{} forkDetectorMock := &mock.ForkDetectorMock{} keyGeneratorMock := &mock.KeyGenMock{} @@ -198,11 +264,13 @@ func TestWorker_NewWorkerConsensusStateNilShouldFail(t *testing.T) { rounderMock := initRounderMock() shardCoordinatorMock := mock.ShardCoordinatorMock{} singleSignerMock := &mock.SingleSignerMock{} + syncTimerMock := &mock.SyncTimerMock{} bnService, _ := bn.NewConsensusService() wrk, err := spos.NewWorker( bnService, blockProcessor, + blockTrackerMock, bootstraperMock, nil, forkDetectorMock, @@ -212,7 +280,9 @@ func TestWorker_NewWorkerConsensusStateNilShouldFail(t *testing.T) { rounderMock, shardCoordinatorMock, singleSignerMock, + syncTimerMock, broadcastBlock, + broadcastHeader, sendMessage, ) @@ -223,6 +293,7 @@ func TestWorker_NewWorkerConsensusStateNilShouldFail(t *testing.T) { func TestWorker_NewWorkerForkDetectorNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} + blockTrackerMock := &mock.BlocksTrackerMock{} bootstraperMock := &mock.BootstraperMock{} consensusState := initConsensusState() keyGeneratorMock := &mock.KeyGenMock{} @@ -231,11 +302,13 @@ func TestWorker_NewWorkerForkDetectorNilShouldFail(t *testing.T) { rounderMock := initRounderMock() shardCoordinatorMock := mock.ShardCoordinatorMock{} singleSignerMock := &mock.SingleSignerMock{} + syncTimerMock := &mock.SyncTimerMock{} bnService, _ := bn.NewConsensusService() wrk, err := spos.NewWorker( bnService, blockProcessor, + blockTrackerMock, bootstraperMock, consensusState, nil, @@ -245,7 +318,9 @@ func TestWorker_NewWorkerForkDetectorNilShouldFail(t *testing.T) { rounderMock, shardCoordinatorMock, singleSignerMock, + syncTimerMock, broadcastBlock, + broadcastHeader, sendMessage, ) @@ -256,6 +331,7 @@ func TestWorker_NewWorkerForkDetectorNilShouldFail(t *testing.T) { func TestWorker_NewWorkerKeyGeneratorNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} + blockTrackerMock := &mock.BlocksTrackerMock{} bootstraperMock := &mock.BootstraperMock{} consensusState := initConsensusState() forkDetectorMock := &mock.ForkDetectorMock{} @@ -264,11 +340,13 @@ func TestWorker_NewWorkerKeyGeneratorNilShouldFail(t *testing.T) { rounderMock := initRounderMock() shardCoordinatorMock := mock.ShardCoordinatorMock{} singleSignerMock := &mock.SingleSignerMock{} + syncTimerMock := &mock.SyncTimerMock{} bnService, _ := bn.NewConsensusService() wrk, err := spos.NewWorker( bnService, blockProcessor, + blockTrackerMock, bootstraperMock, consensusState, forkDetectorMock, @@ -278,7 +356,9 @@ func TestWorker_NewWorkerKeyGeneratorNilShouldFail(t *testing.T) { rounderMock, shardCoordinatorMock, singleSignerMock, + syncTimerMock, broadcastBlock, + broadcastHeader, sendMessage, ) @@ -289,6 +369,7 @@ func TestWorker_NewWorkerKeyGeneratorNilShouldFail(t *testing.T) { func TestWorker_NewWorkerMarshalizerNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} + blockTrackerMock := &mock.BlocksTrackerMock{} bootstraperMock := &mock.BootstraperMock{} consensusState := initConsensusState() forkDetectorMock := &mock.ForkDetectorMock{} @@ -297,11 +378,13 @@ func TestWorker_NewWorkerMarshalizerNilShouldFail(t *testing.T) { rounderMock := initRounderMock() shardCoordinatorMock := mock.ShardCoordinatorMock{} singleSignerMock := &mock.SingleSignerMock{} + syncTimerMock := &mock.SyncTimerMock{} bnService, _ := bn.NewConsensusService() wrk, err := spos.NewWorker( bnService, blockProcessor, + blockTrackerMock, bootstraperMock, consensusState, forkDetectorMock, @@ -311,7 +394,9 @@ func TestWorker_NewWorkerMarshalizerNilShouldFail(t *testing.T) { rounderMock, shardCoordinatorMock, singleSignerMock, + syncTimerMock, broadcastBlock, + broadcastHeader, sendMessage, ) @@ -322,6 +407,7 @@ func TestWorker_NewWorkerMarshalizerNilShouldFail(t *testing.T) { func TestWorker_NewWorkerPrivateKeyNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} + blockTrackerMock := &mock.BlocksTrackerMock{} bootstraperMock := &mock.BootstraperMock{} consensusState := initConsensusState() forkDetectorMock := &mock.ForkDetectorMock{} @@ -330,11 +416,13 @@ func TestWorker_NewWorkerPrivateKeyNilShouldFail(t *testing.T) { rounderMock := initRounderMock() shardCoordinatorMock := mock.ShardCoordinatorMock{} singleSignerMock := &mock.SingleSignerMock{} + syncTimerMock := &mock.SyncTimerMock{} bnService, _ := bn.NewConsensusService() wrk, err := spos.NewWorker( bnService, blockProcessor, + blockTrackerMock, bootstraperMock, consensusState, forkDetectorMock, @@ -344,7 +432,9 @@ func TestWorker_NewWorkerPrivateKeyNilShouldFail(t *testing.T) { rounderMock, shardCoordinatorMock, singleSignerMock, + syncTimerMock, broadcastBlock, + broadcastHeader, sendMessage, ) @@ -355,6 +445,7 @@ func TestWorker_NewWorkerPrivateKeyNilShouldFail(t *testing.T) { func TestWorker_NewWorkerRounderNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} + blockTrackerMock := &mock.BlocksTrackerMock{} bootstraperMock := &mock.BootstraperMock{} consensusState := initConsensusState() forkDetectorMock := &mock.ForkDetectorMock{} @@ -363,11 +454,13 @@ func TestWorker_NewWorkerRounderNilShouldFail(t *testing.T) { privateKeyMock := &mock.PrivateKeyMock{} shardCoordinatorMock := mock.ShardCoordinatorMock{} singleSignerMock := &mock.SingleSignerMock{} + syncTimerMock := &mock.SyncTimerMock{} bnService, _ := bn.NewConsensusService() wrk, err := spos.NewWorker( bnService, blockProcessor, + blockTrackerMock, bootstraperMock, consensusState, forkDetectorMock, @@ -377,7 +470,9 @@ func TestWorker_NewWorkerRounderNilShouldFail(t *testing.T) { nil, shardCoordinatorMock, singleSignerMock, + syncTimerMock, broadcastBlock, + broadcastHeader, sendMessage, ) @@ -388,6 +483,7 @@ func TestWorker_NewWorkerRounderNilShouldFail(t *testing.T) { func TestWorker_NewWorkerShardCoordinatorNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} + blockTrackerMock := &mock.BlocksTrackerMock{} bootstraperMock := &mock.BootstraperMock{} consensusState := initConsensusState() forkDetectorMock := &mock.ForkDetectorMock{} @@ -396,11 +492,13 @@ func TestWorker_NewWorkerShardCoordinatorNilShouldFail(t *testing.T) { privateKeyMock := &mock.PrivateKeyMock{} rounderMock := initRounderMock() singleSignerMock := &mock.SingleSignerMock{} + syncTimerMock := &mock.SyncTimerMock{} bnService, _ := bn.NewConsensusService() wrk, err := spos.NewWorker( bnService, blockProcessor, + blockTrackerMock, bootstraperMock, consensusState, forkDetectorMock, @@ -410,7 +508,9 @@ func TestWorker_NewWorkerShardCoordinatorNilShouldFail(t *testing.T) { rounderMock, nil, singleSignerMock, + syncTimerMock, broadcastBlock, + broadcastHeader, sendMessage, ) @@ -421,6 +521,7 @@ func TestWorker_NewWorkerShardCoordinatorNilShouldFail(t *testing.T) { func TestWorker_NewWorkerSingleSignerNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} + blockTrackerMock := &mock.BlocksTrackerMock{} bootstraperMock := &mock.BootstraperMock{} consensusState := initConsensusState() forkDetectorMock := &mock.ForkDetectorMock{} @@ -429,11 +530,13 @@ func TestWorker_NewWorkerSingleSignerNilShouldFail(t *testing.T) { privateKeyMock := &mock.PrivateKeyMock{} rounderMock := initRounderMock() shardCoordinatorMock := mock.ShardCoordinatorMock{} + syncTimerMock := &mock.SyncTimerMock{} bnService, _ := bn.NewConsensusService() wrk, err := spos.NewWorker( bnService, blockProcessor, + blockTrackerMock, bootstraperMock, consensusState, forkDetectorMock, @@ -443,7 +546,9 @@ func TestWorker_NewWorkerSingleSignerNilShouldFail(t *testing.T) { rounderMock, shardCoordinatorMock, nil, + syncTimerMock, broadcastBlock, + broadcastHeader, sendMessage, ) @@ -451,9 +556,87 @@ func TestWorker_NewWorkerSingleSignerNilShouldFail(t *testing.T) { assert.Equal(t, spos.ErrNilSingleSigner, err) } +func TestWorker_NewWorkerSyncTimerNilShouldFail(t *testing.T) { + t.Parallel() + blockProcessor := &mock.BlockProcessorMock{} + blockTrackerMock := &mock.BlocksTrackerMock{} + bootstraperMock := &mock.BootstraperMock{} + consensusState := initConsensusState() + forkDetectorMock := &mock.ForkDetectorMock{} + keyGeneratorMock := &mock.KeyGenMock{} + marshalizerMock := mock.MarshalizerMock{} + privateKeyMock := &mock.PrivateKeyMock{} + rounderMock := initRounderMock() + shardCoordinatorMock := mock.ShardCoordinatorMock{} + singleSignerMock := &mock.SingleSignerMock{} + bnService, _ := bn.NewConsensusService() + + wrk, err := spos.NewWorker( + bnService, + blockProcessor, + blockTrackerMock, + bootstraperMock, + consensusState, + forkDetectorMock, + keyGeneratorMock, + marshalizerMock, + privateKeyMock, + rounderMock, + shardCoordinatorMock, + singleSignerMock, + nil, + broadcastBlock, + broadcastHeader, + sendMessage, + ) + + assert.Nil(t, wrk) + assert.Equal(t, spos.ErrNilSyncTimer, err) +} + func TestWorker_NewWorkerBroadcastBlockNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} + blockTrackerMock := &mock.BlocksTrackerMock{} + bootstraperMock := &mock.BootstraperMock{} + consensusState := initConsensusState() + forkDetectorMock := &mock.ForkDetectorMock{} + keyGeneratorMock := &mock.KeyGenMock{} + marshalizerMock := mock.MarshalizerMock{} + privateKeyMock := &mock.PrivateKeyMock{} + rounderMock := initRounderMock() + shardCoordinatorMock := mock.ShardCoordinatorMock{} + singleSignerMock := &mock.SingleSignerMock{} + syncTimerMock := &mock.SyncTimerMock{} + bnService, _ := bn.NewConsensusService() + + wrk, err := spos.NewWorker( + bnService, + blockProcessor, + blockTrackerMock, + bootstraperMock, + consensusState, + forkDetectorMock, + keyGeneratorMock, + marshalizerMock, + privateKeyMock, + rounderMock, + shardCoordinatorMock, + singleSignerMock, + syncTimerMock, + nil, + broadcastHeader, + sendMessage, + ) + + assert.Nil(t, wrk) + assert.Equal(t, spos.ErrNilBroadcastBlock, err) +} + +func TestWorker_NewWorkerBroadcastHeaderNilShouldFail(t *testing.T) { + t.Parallel() + blockProcessor := &mock.BlockProcessorMock{} + blockTrackerMock := &mock.BlocksTrackerMock{} bootstraperMock := &mock.BootstraperMock{} consensusState := initConsensusState() forkDetectorMock := &mock.ForkDetectorMock{} @@ -463,11 +646,13 @@ func TestWorker_NewWorkerBroadcastBlockNilShouldFail(t *testing.T) { rounderMock := initRounderMock() shardCoordinatorMock := mock.ShardCoordinatorMock{} singleSignerMock := &mock.SingleSignerMock{} + syncTimerMock := &mock.SyncTimerMock{} bnService, _ := bn.NewConsensusService() wrk, err := spos.NewWorker( bnService, blockProcessor, + blockTrackerMock, bootstraperMock, consensusState, forkDetectorMock, @@ -477,17 +662,20 @@ func TestWorker_NewWorkerBroadcastBlockNilShouldFail(t *testing.T) { rounderMock, shardCoordinatorMock, singleSignerMock, + syncTimerMock, + broadcastBlock, nil, sendMessage, ) assert.Nil(t, wrk) - assert.Equal(t, spos.ErrNilBroadCastBlock, err) + assert.Equal(t, spos.ErrNilBroadcastHeader, err) } func TestWorker_NewWorkerSendMessageNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} + blockTrackerMock := &mock.BlocksTrackerMock{} bootstraperMock := &mock.BootstraperMock{} consensusState := initConsensusState() forkDetectorMock := &mock.ForkDetectorMock{} @@ -497,11 +685,13 @@ func TestWorker_NewWorkerSendMessageNilShouldFail(t *testing.T) { rounderMock := initRounderMock() shardCoordinatorMock := mock.ShardCoordinatorMock{} singleSignerMock := &mock.SingleSignerMock{} + syncTimerMock := &mock.SyncTimerMock{} bnService, _ := bn.NewConsensusService() wrk, err := spos.NewWorker( bnService, blockProcessor, + blockTrackerMock, bootstraperMock, consensusState, forkDetectorMock, @@ -511,7 +701,9 @@ func TestWorker_NewWorkerSendMessageNilShouldFail(t *testing.T) { rounderMock, shardCoordinatorMock, singleSignerMock, + syncTimerMock, broadcastBlock, + broadcastHeader, nil, ) @@ -522,6 +714,7 @@ func TestWorker_NewWorkerSendMessageNilShouldFail(t *testing.T) { func TestWorker_NewWorkerShouldWork(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} + blockTrackerMock := &mock.BlocksTrackerMock{} bootstraperMock := &mock.BootstraperMock{} consensusState := initConsensusState() forkDetectorMock := &mock.ForkDetectorMock{} @@ -531,11 +724,13 @@ func TestWorker_NewWorkerShouldWork(t *testing.T) { rounderMock := initRounderMock() shardCoordinatorMock := mock.ShardCoordinatorMock{} singleSignerMock := &mock.SingleSignerMock{} + syncTimerMock := &mock.SyncTimerMock{} bnService, _ := bn.NewConsensusService() wrk, err := spos.NewWorker( bnService, blockProcessor, + blockTrackerMock, bootstraperMock, consensusState, forkDetectorMock, @@ -545,7 +740,9 @@ func TestWorker_NewWorkerShouldWork(t *testing.T) { rounderMock, shardCoordinatorMock, singleSignerMock, + syncTimerMock, broadcastBlock, + broadcastHeader, sendMessage, ) @@ -1328,7 +1525,7 @@ func TestWorker_ExtendShouldReturnWhenCreateEmptyBlockFail(t *testing.T) { t.Parallel() wrk := *initWorker() executed := false - wrk.SetBroadCastBlock(func(data.BodyHandler, data.HeaderHandler) error { + wrk.SetBroadcastBlock(func(data.BodyHandler, data.HeaderHandler) error { executed = true return nil }) @@ -1412,3 +1609,174 @@ func TestWorker_ExecuteStoredMessagesShouldWork(t *testing.T) { rcvMsg = wrk.ReceivedMessages() assert.Equal(t, 0, len(rcvMsg[msgType])) } + +func TestWorker_BroadcastUnnotarisedBlocksShouldNotBroadcastWhenBlockIsNotFinal(t *testing.T) { + t.Parallel() + + headerHasBeenBroadcast := false + broadcastInRound := int32(0) + + wrk := *initWorker() + header := &block.Header{Nonce: 3} + roundIndex := int32(10) + blockTracker := &mock.BlocksTrackerMock{ + UnnotarisedBlocksCalled: func() []data.HeaderHandler { + headers := make([]data.HeaderHandler, 0) + headers = append(headers, header) + return headers + }, + BlockBroadcastRoundCalled: func(nonce uint64) int32 { + return broadcastInRound + }, + SetBlockBroadcastRoundCalled: func(nonce uint64, round int32) { + broadcastInRound = round + }, + } + + forkDetector := &mock.ForkDetectorMock{ + GetHighestFinalBlockNonceCalled: func() uint64 { + return header.Nonce - 1 + }, + } + + wrk.ConsensusState().RoundIndex = int32(roundIndex) + wrk.SetBlockTracker(blockTracker) + wrk.SetForkDetector(forkDetector) + wrk.SetBroadcastHeader(func(headerHandler data.HeaderHandler) error { + headerHasBeenBroadcast = true + return nil + }) + + wrk.BroadcastUnnotarisedBlocks() + assert.False(t, headerHasBeenBroadcast) + assert.Equal(t, int32(0), wrk.BlockTracker().BlockBroadcastRound(header.Nonce)) +} + +func TestWorker_BroadcastUnnotarisedBlocksShouldNotBroadcastWhenMaxRoundGapIsNotAchieved(t *testing.T) { + t.Parallel() + + headerHasBeenBroadcast := false + broadcastInRound := int32(0) + + wrk := *initWorker() + header := &block.Header{Nonce: 3} + roundIndex := int32(10) + blockTracker := &mock.BlocksTrackerMock{ + UnnotarisedBlocksCalled: func() []data.HeaderHandler { + headers := make([]data.HeaderHandler, 0) + headers = append(headers, header) + return headers + }, + BlockBroadcastRoundCalled: func(nonce uint64) int32 { + return broadcastInRound + }, + SetBlockBroadcastRoundCalled: func(nonce uint64, round int32) { + broadcastInRound = round + }, + } + + forkDetector := &mock.ForkDetectorMock{ + GetHighestFinalBlockNonceCalled: func() uint64 { + return header.Nonce + }, + } + + wrk.ConsensusState().RoundIndex = int32(roundIndex) + wrk.SetBlockTracker(blockTracker) + wrk.SetForkDetector(forkDetector) + wrk.SetBroadcastHeader(func(headerHandler data.HeaderHandler) error { + headerHasBeenBroadcast = true + return nil + }) + wrk.BlockTracker().SetBlockBroadcastRound(header.Nonce, int32(roundIndex-spos.MaxRoundsGap)) + + wrk.BroadcastUnnotarisedBlocks() + assert.False(t, headerHasBeenBroadcast) + assert.Equal(t, int32(roundIndex-spos.MaxRoundsGap), wrk.BlockTracker().BlockBroadcastRound(header.Nonce)) +} + +func TestWorker_BroadcastUnnotarisedBlocksShouldErrWhenBroadcastHeaderFails(t *testing.T) { + t.Parallel() + + broadcastInRound := int32(0) + + var err error + wrk := *initWorker() + header := &block.Header{Nonce: 3} + roundIndex := int32(10) + blockTracker := &mock.BlocksTrackerMock{ + UnnotarisedBlocksCalled: func() []data.HeaderHandler { + headers := make([]data.HeaderHandler, 0) + headers = append(headers, header) + return headers + }, + BlockBroadcastRoundCalled: func(nonce uint64) int32 { + return broadcastInRound + }, + SetBlockBroadcastRoundCalled: func(nonce uint64, round int32) { + broadcastInRound = round + }, + } + + forkDetector := &mock.ForkDetectorMock{ + GetHighestFinalBlockNonceCalled: func() uint64 { + return header.Nonce + }, + } + + wrk.ConsensusState().RoundIndex = int32(roundIndex) + wrk.SetBlockTracker(blockTracker) + wrk.SetForkDetector(forkDetector) + wrk.SetBroadcastHeader(func(headerHandler data.HeaderHandler) error { + err = errors.New("broadcast header error") + return err + }) + wrk.BlockTracker().SetBlockBroadcastRound(header.Nonce, int32(roundIndex-spos.MaxRoundsGap-1)) + + wrk.BroadcastUnnotarisedBlocks() + assert.NotNil(t, err) + assert.Equal(t, int32(roundIndex-spos.MaxRoundsGap-1), wrk.BlockTracker().BlockBroadcastRound(header.Nonce)) +} + +func TestWorker_BroadcastUnnotarisedBlocksShouldBroadcast(t *testing.T) { + t.Parallel() + + headerHasBeenBroadcast := false + broadcastInRound := int32(0) + + wrk := *initWorker() + header := &block.Header{Nonce: 3} + roundIndex := int32(10) + blockTracker := &mock.BlocksTrackerMock{ + UnnotarisedBlocksCalled: func() []data.HeaderHandler { + headers := make([]data.HeaderHandler, 0) + headers = append(headers, header) + return headers + }, + BlockBroadcastRoundCalled: func(nonce uint64) int32 { + return broadcastInRound + }, + SetBlockBroadcastRoundCalled: func(nonce uint64, round int32) { + broadcastInRound = round + }, + } + + forkDetector := &mock.ForkDetectorMock{ + GetHighestFinalBlockNonceCalled: func() uint64 { + return header.Nonce + }, + } + + wrk.ConsensusState().RoundIndex = int32(roundIndex) + wrk.SetBlockTracker(blockTracker) + wrk.SetForkDetector(forkDetector) + wrk.SetBroadcastHeader(func(headerHandler data.HeaderHandler) error { + headerHasBeenBroadcast = true + return nil + }) + wrk.BlockTracker().SetBlockBroadcastRound(header.Nonce, int32(roundIndex-spos.MaxRoundsGap-1)) + + wrk.BroadcastUnnotarisedBlocks() + assert.True(t, headerHasBeenBroadcast) + assert.Equal(t, roundIndex, wrk.BlockTracker().BlockBroadcastRound(header.Nonce)) +} diff --git a/core/converters.go b/core/converters.go index a20101b488d..3c18e183c4a 100644 --- a/core/converters.go +++ b/core/converters.go @@ -1,6 +1,8 @@ package core import ( + "encoding/base64" + "encoding/hex" "fmt" ) @@ -17,3 +19,19 @@ func ConvertBytes(bytes uint64) string { } return fmt.Sprintf("%.2f GB", float64(bytes)/1024.0/1024.0/1024.0) } + +// ToB64 encodes the given buff to base64 +func ToB64(buff []byte) string { + if buff == nil { + return "" + } + return base64.StdEncoding.EncodeToString(buff) +} + +// ToHex encodes the given buff to hex +func ToHex(buff []byte) string { + if buff == nil { + return "" + } + return "0x" + hex.EncodeToString(buff) +} diff --git a/core/converters_test.go b/core/converters_test.go new file mode 100644 index 00000000000..ee19fb7bbfa --- /dev/null +++ b/core/converters_test.go @@ -0,0 +1,32 @@ +package core_test + +import ( + "encoding/base64" + "encoding/hex" + "testing" + + "github.com/ElrondNetwork/elrond-go-sandbox/core" + "github.com/stretchr/testify/assert" +) + +func TestToB64ShouldReturnNil(t *testing.T) { + val := core.ToB64(nil) + assert.Equal(t, "", val) +} + +func TestToB64ShouldWork(t *testing.T) { + buff := []byte("test") + val := core.ToB64(buff) + assert.Equal(t, base64.StdEncoding.EncodeToString(buff), val) +} + +func TestToHexShouldReturnNil(t *testing.T) { + val := core.ToHex(nil) + assert.Equal(t, "", val) +} + +func TestToHexShouldWork(t *testing.T) { + buff := []byte("test") + val := core.ToHex(buff) + assert.Equal(t, "0x"+hex.EncodeToString(buff), val) +} diff --git a/integrationTests/consensus/testInitializer.go b/integrationTests/consensus/testInitializer.go index 03fec72fb6a..e39b612906a 100644 --- a/integrationTests/consensus/testInitializer.go +++ b/integrationTests/consensus/testInitializer.go @@ -269,6 +269,11 @@ func createConsensusOnlyNode( return nil } blockProcessor.Marshalizer = testMarshalizer + blockTracker := &mock.BlocksTrackerMock{ + UnnotarisedBlocksCalled: func() []data.HeaderHandler { + return make([]data.HeaderHandler, 0) + }, + } blockChain := createTestBlockChain() header := &dataBlock.Header{ @@ -353,6 +358,7 @@ func createConsensusOnlyNode( node.WithDataStore(createTestStore()), node.WithResolversFinder(resolverFinder), node.WithConsensusType(consensusType), + node.WithBlockTracker(blockTracker), ) if err != nil { diff --git a/integrationTests/mock/blocksTrackerMock.go b/integrationTests/mock/blocksTrackerMock.go new file mode 100644 index 00000000000..5c516071da0 --- /dev/null +++ b/integrationTests/mock/blocksTrackerMock.go @@ -0,0 +1,33 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/data" +) + +type BlocksTrackerMock struct { + UnnotarisedBlocksCalled func() []data.HeaderHandler + RemoveNotarisedBlocksCalled func(headerHandler data.HeaderHandler) error + AddBlockCalled func(headerHandler data.HeaderHandler) + SetBlockBroadcastRoundCalled func(nonce uint64, round int32) + BlockBroadcastRoundCalled func(nonce uint64) int32 +} + +func (btm *BlocksTrackerMock) UnnotarisedBlocks() []data.HeaderHandler { + return btm.UnnotarisedBlocksCalled() +} + +func (btm *BlocksTrackerMock) RemoveNotarisedBlocks(headerHandler data.HeaderHandler) error { + return btm.RemoveNotarisedBlocksCalled(headerHandler) +} + +func (btm *BlocksTrackerMock) AddBlock(headerHandler data.HeaderHandler) { + btm.AddBlockCalled(headerHandler) +} + +func (btm *BlocksTrackerMock) SetBlockBroadcastRound(nonce uint64, round int32) { + btm.SetBlockBroadcastRoundCalled(nonce, round) +} + +func (btm *BlocksTrackerMock) BlockBroadcastRound(nonce uint64) int32 { + return btm.BlockBroadcastRoundCalled(nonce) +} diff --git a/integrationTests/multiShard/block/executingMiniblocks_test.go b/integrationTests/multiShard/block/executingMiniblocks_test.go index 1ae68c5bb39..f39395b9083 100644 --- a/integrationTests/multiShard/block/executingMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingMiniblocks_test.go @@ -91,6 +91,7 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { fmt.Println("Step 6. Proposer disseminates header, block body and miniblocks...") proposerNode.node.BroadcastShardBlock(blockBody, blockHeader) + proposerNode.node.BroadcastShardHeader(blockHeader) fmt.Println("Delaying for disseminating miniblocks and header...") time.Sleep(time.Second * 5) fmt.Println(makeDisplayTable(nodes)) @@ -151,6 +152,7 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { body, header := proposeBlock(t, receiverProposer) receiverProposer.node.BroadcastShardBlock(body, header) + receiverProposer.node.BroadcastShardHeader(header) } fmt.Println("Delaying for disseminating miniblocks and headers...") time.Sleep(time.Second * 5) diff --git a/integrationTests/multiShard/block/testInitializer.go b/integrationTests/multiShard/block/testInitializer.go index e59c17ae7c7..17734ab890b 100644 --- a/integrationTests/multiShard/block/testInitializer.go +++ b/integrationTests/multiShard/block/testInitializer.go @@ -255,6 +255,13 @@ func createNetNode( return 0 }, }, + &mock.BlocksTrackerMock{ + AddBlockCalled: func(headerHandler data.HeaderHandler) { + }, + RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { + return nil + }, + }, func(destShardID uint32, txHashes [][]byte) { resolver, err := resolversFinder.CrossShardResolver(factory.TransactionTopic, destShardID) if err != nil { diff --git a/integrationTests/multiShard/metablock/blocksDissemination_test.go b/integrationTests/multiShard/metablock/blocksDissemination_test.go index e1ecc0507cb..aec8e1eca79 100644 --- a/integrationTests/multiShard/metablock/blocksDissemination_test.go +++ b/integrationTests/multiShard/metablock/blocksDissemination_test.go @@ -55,6 +55,8 @@ func TestHeadersAreReceivedByMetachainAndShard(t *testing.T) { body, hdr := generateHeaderAndBody(senderShard) err := nodes[0].node.BroadcastShardBlock(body, hdr) assert.Nil(t, err) + err = nodes[0].node.BroadcastShardHeader(hdr) + assert.Nil(t, err) for i := 0; i < 5; i++ { fmt.Println(makeDisplayTable(nodes)) diff --git a/integrationTests/multiShard/metablock/testInitializer.go b/integrationTests/multiShard/metablock/testInitializer.go index 475861e3966..62c2add1639 100644 --- a/integrationTests/multiShard/metablock/testInitializer.go +++ b/integrationTests/multiShard/metablock/testInitializer.go @@ -321,6 +321,13 @@ func createShardNetNode( return 0 }, }, + &mock.BlocksTrackerMock{ + AddBlockCalled: func(headerHandler data.HeaderHandler) { + }, + RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { + return nil + }, + }, func(shardId uint32, txHash [][]byte) { }, diff --git a/integrationTests/singleShard/transaction/testInitializer.go b/integrationTests/singleShard/transaction/testInitializer.go index f391d97581f..8441c3d94a7 100644 --- a/integrationTests/singleShard/transaction/testInitializer.go +++ b/integrationTests/singleShard/transaction/testInitializer.go @@ -230,6 +230,7 @@ func createNetNode( node.WithResolversFinder(resolversFinder), node.WithDataStore(store), node.WithTxSingleSigner(singleSigner), + node.WithTxStorageSize(100000), ) return n, messenger, sk, resolversFinder diff --git a/integrationTests/state/stateTrie_test.go b/integrationTests/state/stateTrie_test.go index 5bdefcac4d3..422eb67f8cb 100644 --- a/integrationTests/state/stateTrie_test.go +++ b/integrationTests/state/stateTrie_test.go @@ -307,7 +307,7 @@ func TestAccountsDB_CommitAccountDataShouldWork(t *testing.T) { _, err = adb.Commit() assert.Nil(t, err) hrCommit := base64.StdEncoding.EncodeToString(adb.RootHash()) - fmt.Printf("State root - commited: %v\n", hrCommit) + fmt.Printf("State root - committed: %v\n", hrCommit) //commit hash == account with balance assert.Equal(t, hrCommit, hrWithBalance) diff --git a/node/defineOptions.go b/node/defineOptions.go index efa9982f0ac..21ffa56ab97 100644 --- a/node/defineOptions.go +++ b/node/defineOptions.go @@ -180,13 +180,13 @@ func WithConsensusGroupSize(consensusGroupSize int) Option { } } -// WithSyncer sets up the syncer option for the Node +// WithSyncer sets up the syncTimer option for the Node func WithSyncer(syncer ntp.SyncTimer) Option { return func(n *Node) error { if syncer == nil { return ErrNilSyncTimer } - n.syncer = syncer + n.syncTimer = syncer return nil } } @@ -213,6 +213,17 @@ func WithBlockProcessor(blockProcessor process.BlockProcessor) Option { } } +// WithBlockTracker sets up the block tracker option for the Node +func WithBlockTracker(blockTracker process.BlocksTracker) Option { + return func(n *Node) error { + if blockTracker == nil { + return ErrNilBlockTracker + } + n.blockTracker = blockTracker + return nil + } +} + // WithGenesisTime sets up the genesis time option for the Node func WithGenesisTime(genesisTime time.Time) Option { return func(n *Node) error { @@ -358,3 +369,11 @@ func WithConsensusType(consensusType string) Option { return nil } } + +// WithTxStorageSize sets up a txStorageSize option for the Node +func WithTxStorageSize(txStorageSize uint32) Option { + return func(n *Node) error { + n.txStorageSize = txStorageSize + return nil + } +} diff --git a/node/defineOptions_test.go b/node/defineOptions_test.go index 13a4d0bfdfc..3af33171088 100644 --- a/node/defineOptions_test.go +++ b/node/defineOptions_test.go @@ -388,7 +388,7 @@ func TestWithSyncer_NilSyncerShouldErr(t *testing.T) { opt := WithSyncer(nil) err := opt(node) - assert.Nil(t, node.syncer) + assert.Nil(t, node.syncTimer) assert.Equal(t, ErrNilSyncTimer, err) } @@ -402,7 +402,7 @@ func TestWithSyncer_ShouldWork(t *testing.T) { opt := WithSyncer(sync) err := opt(node) - assert.True(t, node.syncer == sync) + assert.True(t, node.syncTimer == sync) assert.Nil(t, err) } @@ -433,7 +433,7 @@ func TestWithBlockProcessor_NilProcessorShouldErr(t *testing.T) { opt := WithBlockProcessor(nil) err := opt(node) - assert.Nil(t, node.syncer) + assert.Nil(t, node.syncTimer) assert.Equal(t, ErrNilBlockProcessor, err) } diff --git a/node/errors.go b/node/errors.go index ae631a6bc12..6a6c3700c30 100644 --- a/node/errors.go +++ b/node/errors.go @@ -49,6 +49,9 @@ var ErrNilRounder = errors.New("trying to set nil rounder") // ErrNilBlockProcessor signals that a nil block processor has been provided var ErrNilBlockProcessor = errors.New("trying to set nil block processor") +// ErrNilBlockTracker signals that a nil block tracker has been provided +var ErrNilBlockTracker = errors.New("trying to set nil block tracker") + // ErrNilDataPool signals that a nil data pool has been provided var ErrNilDataPool = errors.New("trying to set nil data pool") @@ -108,3 +111,9 @@ var ErrWrongValues = errors.New("wrong values for heartbeat parameters") // ErrGenesisBlockNotInitialized signals that genesis block is not initialized var ErrGenesisBlockNotInitialized = errors.New("genesis block is not initialized") + +// ErrNilTransactionPool signals that a nil transaction pool was used +var ErrNilTransactionPool = errors.New("nil transaction pool") + +// ErrTooManyTransactionsInPool signals that are too many transactions in pool +var ErrTooManyTransactionsInPool = errors.New("too many transactions in pool") diff --git a/node/mock/shardCoordinatorMock.go b/node/mock/shardCoordinatorMock.go index 4d5f3b7e8ef..4685e90ac43 100644 --- a/node/mock/shardCoordinatorMock.go +++ b/node/mock/shardCoordinatorMock.go @@ -14,7 +14,7 @@ func (scm ShardCoordinatorMock) NumberOfShards() uint32 { } func (scm ShardCoordinatorMock) ComputeId(address state.AddressContainer) uint32 { - panic("implement me") + return 0 } func (scm ShardCoordinatorMock) SetSelfShardId(shardId uint32) error { diff --git a/node/node.go b/node/node.go index 540c3eaf975..f18f558202d 100644 --- a/node/node.go +++ b/node/node.go @@ -64,9 +64,10 @@ type Node struct { roundDuration uint64 consensusGroupSize int messenger P2PMessenger - syncer ntp.SyncTimer + syncTimer ntp.SyncTimer rounder consensus.Rounder blockProcessor process.BlockProcessor + blockTracker process.BlocksTracker genesisTime time.Time accounts state.AccountsAdapter addrConverter state.AddressConverter @@ -97,6 +98,7 @@ type Node struct { isRunning bool isMetachainActive bool + txStorageSize uint32 } // ApplyOptions can set up different configurable options of a Node instance @@ -233,6 +235,7 @@ func (n *Node) StartConsensus() error { worker, err := spos.NewWorker( consensusService, n.blockProcessor, + n.blockTracker, bootstrapper, consensusState, n.forkDetector, @@ -242,7 +245,9 @@ func (n *Node) StartConsensus() error { n.rounder, n.shardCoordinator, n.singleSigner, + n.syncTimer, n.getBroadcastBlock(), + n.getBroadcastHeader(), n.sendMessage, ) if err != nil { @@ -271,7 +276,7 @@ func (n *Node) StartConsensus() error { n.multiSigner, n.rounder, n.shardCoordinator, - n.syncer, + n.syncTimer, validatorGroupSelector) if err != nil { return err @@ -383,7 +388,7 @@ func (n *Node) createChronologyHandler(rounder consensus.Rounder) (consensus.Chr chr, err := chronology.NewChronology( n.genesisTime, rounder, - n.syncer) + n.syncTimer) if err != nil { return nil, err @@ -404,6 +409,18 @@ func (n *Node) getBroadcastBlock() func(data.BodyHandler, data.HeaderHandler) er return nil } +func (n *Node) getBroadcastHeader() func(data.HeaderHandler) error { + if n.shardCoordinator.SelfId() < n.shardCoordinator.NumberOfShards() { + return n.BroadcastShardHeader + } + + if n.shardCoordinator.SelfId() == sharding.MetachainShardId { + return n.BroadcastMetaHeader + } + + return nil +} + func (n *Node) createBootstrapper(rounder consensus.Rounder) (process.Bootstrapper, error) { if n.shardCoordinator.SelfId() < n.shardCoordinator.NumberOfShards() { return n.createShardBootstrapper(rounder) @@ -689,22 +706,6 @@ func (n *Node) BroadcastShardBlock(blockBody data.BodyHandler, header data.Heade go n.messenger.Broadcast(factory.MiniBlocksTopic+ n.shardCoordinator.CommunicationIdentifier(n.shardCoordinator.SelfId()), msgBlockBody) - if !n.isMetachainActive { - //TODO - remove this when metachain is fully tested. Should remove only "if" branch, - // the "else" branch should not be removed - msgMetablockBuff, err := n.createMetaBlockFromBlockHeader(header, msgHeader) - if err != nil { - return err - } - - go n.messenger.Broadcast(factory.MetachainBlocksTopic, msgMetablockBuff) - } else { - shardHeaderForMetachainTopic := factory.ShardHeadersForMetachainTopic + - n.shardCoordinator.CommunicationIdentifier(sharding.MetachainShardId) - - go n.messenger.Broadcast(shardHeaderForMetachainTopic, msgHeader) - } - for k, v := range msgMapBlockBody { go n.messenger.Broadcast(factory.MiniBlocksTopic+ n.shardCoordinator.CommunicationIdentifier(k), v) @@ -727,6 +728,36 @@ func (n *Node) BroadcastShardBlock(blockBody data.BodyHandler, header data.Heade return nil } +// BroadcastShardHeader will send on metachain topics the header +func (n *Node) BroadcastShardHeader(header data.HeaderHandler) error { + if header == nil { + return ErrNilBlockHeader + } + + msgHeader, err := n.marshalizer.Marshal(header) + if err != nil { + return err + } + + if !n.isMetachainActive { + //TODO - remove this when metachain is fully tested. Should remove only "if" branch, + // the "else" branch should not be removed + msgMetablockBuff, err := n.createMetaBlockFromBlockHeader(header, msgHeader) + if err != nil { + return err + } + + go n.messenger.Broadcast(factory.MetachainBlocksTopic, msgMetablockBuff) + } else { + shardHeaderForMetachainTopic := factory.ShardHeadersForMetachainTopic + + n.shardCoordinator.CommunicationIdentifier(sharding.MetachainShardId) + + go n.messenger.Broadcast(shardHeaderForMetachainTopic, msgHeader) + } + + return nil +} + // createMetaBlockFromBlockHeader func will be deleted when metachain will be fully implemented as its functionality // will be done by metachain nodes //TODO - delete this func when metachain is fully implemented @@ -790,6 +821,11 @@ func (n *Node) BroadcastMetaBlock(blockBody data.BodyHandler, header data.Header return nil } +// BroadcastMetaHeader will send on metachain topics the header +func (n *Node) BroadcastMetaHeader(headerHandler data.HeaderHandler) error { + return nil +} + // StartHeartbeat starts the node's heartbeat processing/signaling module func (n *Node) StartHeartbeat(config config.HeartbeatConfig) error { if !config.Enabled { diff --git a/node/nodeTesting.go b/node/nodeTesting.go index 293961a08fd..8ed432d47bc 100644 --- a/node/nodeTesting.go +++ b/node/nodeTesting.go @@ -9,12 +9,17 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/core/partitioning" "github.com/ElrondNetwork/elrond-go-sandbox/data/state" "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" + "github.com/ElrondNetwork/elrond-go-sandbox/process" "github.com/ElrondNetwork/elrond-go-sandbox/process/factory" + "github.com/ElrondNetwork/elrond-go-sandbox/sharding" ) //TODO convert this const into a var and read it from config when this code moves to another binary const maxBulkTransactionSize = 2 << 17 //128KB bulks +// maxLoadThresholdPercent specifies the max load percent accepted from txs storage size when generates new txs +const maxLoadThresholdPercent = 70 + //TODO move this funcs in a new benchmarking/stress-test binary // GenerateAndSendBulkTransactions is a method for generating and propagating a set @@ -25,6 +30,34 @@ func (n *Node) GenerateAndSendBulkTransactions(receiverHex string, value *big.In return err } + //TODO: Remove this approach later, when throttle is done + if n.shardCoordinator.SelfId() != sharding.MetachainShardId { + txPool := n.dataPool.Transactions() + if txPool == nil { + return ErrNilTransactionPool + } + + maxNoOfTx := uint64(0) + txStorageSize := uint64(n.txStorageSize) * maxLoadThresholdPercent / 100 + for i := uint32(0); i < n.shardCoordinator.NumberOfShards(); i++ { + strCache := process.ShardCacherIdentifier(n.shardCoordinator.SelfId(), i) + txStore := txPool.ShardDataStore(strCache) + if txStore == nil { + continue + } + + if uint64(txStore.Len())+noOfTx > txStorageSize { + maxNoOfTx = txStorageSize - uint64(txStore.Len()) + if noOfTx > maxNoOfTx { + noOfTx = maxNoOfTx + if noOfTx <= 0 { + return ErrTooManyTransactionsInPool + } + } + } + } + } + newNonce, senderAddressBytes, recvAddressBytes, senderShardId, err := n.generateBulkTransactionsPrepareParams(receiverHex) if err != nil { return err diff --git a/node/nodeTesting_test.go b/node/nodeTesting_test.go index a5a9256b232..edd0cd2c145 100644 --- a/node/nodeTesting_test.go +++ b/node/nodeTesting_test.go @@ -9,9 +9,11 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/data/state" "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" + "github.com/ElrondNetwork/elrond-go-sandbox/dataRetriever" "github.com/ElrondNetwork/elrond-go-sandbox/node" "github.com/ElrondNetwork/elrond-go-sandbox/node/mock" "github.com/ElrondNetwork/elrond-go-sandbox/process/factory" + "github.com/ElrondNetwork/elrond-go-sandbox/storage" "github.com/stretchr/testify/assert" ) @@ -112,12 +114,90 @@ func TestGenerateAndSendBulkTransactions_NilAddressConverterShouldErr(t *testing assert.Equal(t, node.ErrNilAddressConverter, err) } +func TestGenerateAndSendBulkTransactions_NilTransactionPoolShouldErr(t *testing.T) { + marshalizer := &mock.MarshalizerFake{} + accAdapter := getAccAdapter(big.NewInt(0)) + addrConverter := mock.NewAddressConverterFake(32, "0x") + keyGen := &mock.KeyGenMock{} + sk, pk := keyGen.GeneratePair() + singleSigner := &mock.SinglesignMock{} + dataPool := &mock.PoolsHolderStub{ + TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return nil + }, + } + n, _ := node.NewNode( + node.WithMarshalizer(marshalizer), + node.WithAddressConverter(addrConverter), + node.WithHasher(mock.HasherMock{}), + node.WithAccountsAdapter(accAdapter), + node.WithTxSignPrivKey(sk), + node.WithTxSignPubKey(pk), + node.WithTxSingleSigner(singleSigner), + node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), + node.WithDataPool(dataPool), + ) + + err := n.GenerateAndSendBulkTransactions(createDummyHexAddress(64), big.NewInt(0), 1) + assert.Equal(t, node.ErrNilTransactionPool, err) +} + +func TestGenerateAndSendBulkTransactions_TooManyTransactionsInPoolShouldErr(t *testing.T) { + marshalizer := &mock.MarshalizerFake{} + accAdapter := getAccAdapter(big.NewInt(0)) + addrConverter := mock.NewAddressConverterFake(32, "0x") + keyGen := &mock.KeyGenMock{} + sk, pk := keyGen.GeneratePair() + singleSigner := &mock.SinglesignMock{} + dataPool := &mock.PoolsHolderStub{ + TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + ShardDataStoreCalled: func(cacheId string) (c storage.Cacher) { + return &mock.CacherStub{ + LenCalled: func() int { + return 70000 + }, + } + }, + } + }, + } + n, _ := node.NewNode( + node.WithMarshalizer(marshalizer), + node.WithAddressConverter(addrConverter), + node.WithHasher(mock.HasherMock{}), + node.WithAccountsAdapter(accAdapter), + node.WithTxSignPrivKey(sk), + node.WithTxSignPubKey(pk), + node.WithTxSingleSigner(singleSigner), + node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), + node.WithDataPool(dataPool), + node.WithTxStorageSize(100000), + node.WithMessenger(&mock.MessengerStub{ + BroadcastOnChannelCalled: func(channel string, topic string, buff []byte) { + }, + }), + ) + + err := n.GenerateAndSendBulkTransactions(createDummyHexAddress(64), big.NewInt(0), 1) + assert.Equal(t, node.ErrTooManyTransactionsInPool, err) +} + func TestGenerateAndSendBulkTransactions_NilPrivateKeyShouldErr(t *testing.T) { accAdapter := getAccAdapter(big.NewInt(0)) addrConverter := mock.NewAddressConverterFake(32, "0x") keyGen := &mock.KeyGenMock{} _, pk := keyGen.GeneratePair() singleSigner := &mock.SinglesignMock{} + dataPool := &mock.PoolsHolderStub{ + TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + ShardDataStoreCalled: func(cacheId string) (c storage.Cacher) { + return nil + }, + } + }, + } n, _ := node.NewNode( node.WithAccountsAdapter(accAdapter), node.WithAddressConverter(addrConverter), @@ -125,6 +205,7 @@ func TestGenerateAndSendBulkTransactions_NilPrivateKeyShouldErr(t *testing.T) { node.WithMarshalizer(&mock.MarshalizerFake{}), node.WithTxSingleSigner(singleSigner), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), + node.WithDataPool(dataPool), ) err := n.GenerateAndSendBulkTransactions(createDummyHexAddress(64), big.NewInt(0), 1) @@ -156,6 +237,15 @@ func TestGenerateAndSendBulkTransactions_InvalidReceiverAddressShouldErr(t *test keyGen := &mock.KeyGenMock{} sk, pk := keyGen.GeneratePair() singleSigner := &mock.SinglesignMock{} + dataPool := &mock.PoolsHolderStub{ + TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + ShardDataStoreCalled: func(cacheId string) (c storage.Cacher) { + return nil + }, + } + }, + } n, _ := node.NewNode( node.WithAccountsAdapter(accAdapter), node.WithAddressConverter(addrConverter), @@ -163,6 +253,7 @@ func TestGenerateAndSendBulkTransactions_InvalidReceiverAddressShouldErr(t *test node.WithTxSignPubKey(pk), node.WithTxSingleSigner(singleSigner), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), + node.WithDataPool(dataPool), ) err := n.GenerateAndSendBulkTransactions("", big.NewInt(0), 1) @@ -179,6 +270,15 @@ func TestGenerateAndSendBulkTransactions_CreateAddressFromPublicKeyBytesErrorsSh keyGen := &mock.KeyGenMock{} sk, pk := keyGen.GeneratePair() singleSigner := &mock.SinglesignMock{} + dataPool := &mock.PoolsHolderStub{ + TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + ShardDataStoreCalled: func(cacheId string) (c storage.Cacher) { + return nil + }, + } + }, + } n, _ := node.NewNode( node.WithAccountsAdapter(accAdapter), node.WithAddressConverter(addrConverter), @@ -186,6 +286,7 @@ func TestGenerateAndSendBulkTransactions_CreateAddressFromPublicKeyBytesErrorsSh node.WithTxSignPubKey(pk), node.WithTxSingleSigner(singleSigner), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), + node.WithDataPool(dataPool), ) err := n.GenerateAndSendBulkTransactions("", big.NewInt(0), 1) @@ -201,6 +302,15 @@ func TestGenerateAndSendBulkTransactions_MarshalizerErrorsShouldErr(t *testing.T keyGen := &mock.KeyGenMock{} sk, pk := keyGen.GeneratePair() singleSigner := &mock.SinglesignMock{} + dataPool := &mock.PoolsHolderStub{ + TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + ShardDataStoreCalled: func(cacheId string) (c storage.Cacher) { + return nil + }, + } + }, + } n, _ := node.NewNode( node.WithAccountsAdapter(accAdapter), node.WithAddressConverter(addrConverter), @@ -209,6 +319,7 @@ func TestGenerateAndSendBulkTransactions_MarshalizerErrorsShouldErr(t *testing.T node.WithMarshalizer(marshalizer), node.WithTxSingleSigner(singleSigner), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), + node.WithDataPool(dataPool), ) err := n.GenerateAndSendBulkTransactions(createDummyHexAddress(64), big.NewInt(1), 1) @@ -252,6 +363,15 @@ func TestGenerateAndSendBulkTransactions_ShouldWork(t *testing.T) { }, } + dataPool := &mock.PoolsHolderStub{ + TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + ShardDataStoreCalled: func(cacheId string) (c storage.Cacher) { + return nil + }, + } + }, + } accAdapter := getAccAdapter(big.NewInt(0)) addrConverter := mock.NewAddressConverterFake(32, "0x") keyGen := &mock.KeyGenMock{} @@ -266,6 +386,7 @@ func TestGenerateAndSendBulkTransactions_ShouldWork(t *testing.T) { node.WithTxSingleSigner(signer), node.WithShardCoordinator(shardCoordinator), node.WithMessenger(mes), + node.WithDataPool(dataPool), ) err := n.GenerateAndSendBulkTransactions(createDummyHexAddress(64), big.NewInt(1), uint64(noOfTx)) diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 50c68ef15ca..cc7278a12b2 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -2,10 +2,9 @@ package block import ( "bytes" - "encoding/base64" - "encoding/hex" "fmt" + "github.com/ElrondNetwork/elrond-go-sandbox/core" "github.com/ElrondNetwork/elrond-go-sandbox/core/logger" "github.com/ElrondNetwork/elrond-go-sandbox/data" "github.com/ElrondNetwork/elrond-go-sandbox/data/state" @@ -74,7 +73,7 @@ func (bp *baseProcessor) checkBlockValidity( } log.Info(fmt.Sprintf("hash not match: local block hash is empty and node received block with previous hash %s\n", - toB64(headerHandler.GetPrevHash()))) + core.ToB64(headerHandler.GetPrevHash()))) return process.ErrInvalidBlockHash } @@ -99,7 +98,7 @@ func (bp *baseProcessor) checkBlockValidity( if !bytes.Equal(headerHandler.GetPrevHash(), prevHeaderHash) { log.Info(fmt.Sprintf("hash not match: local block hash is %s and node received block with previous hash %s\n", - toB64(prevHeaderHash), toB64(headerHandler.GetPrevHash()))) + core.ToB64(prevHeaderHash), core.ToB64(headerHandler.GetPrevHash()))) return process.ErrInvalidBlockHash } @@ -156,44 +155,30 @@ func displayHeader(headerHandler data.HeaderHandler) []*display.LineData { lines = append(lines, display.NewLineData(false, []string{ "", "Prev hash", - toB64(headerHandler.GetPrevHash())})) + core.ToB64(headerHandler.GetPrevHash())})) lines = append(lines, display.NewLineData(false, []string{ "", "Prev rand seed", - toB64(headerHandler.GetPrevRandSeed())})) + core.ToB64(headerHandler.GetPrevRandSeed())})) lines = append(lines, display.NewLineData(false, []string{ "", "Rand seed", - toB64(headerHandler.GetRandSeed())})) + core.ToB64(headerHandler.GetRandSeed())})) lines = append(lines, display.NewLineData(false, []string{ "", "Pub keys bitmap", - toHex(headerHandler.GetPubKeysBitmap())})) + core.ToHex(headerHandler.GetPubKeysBitmap())})) lines = append(lines, display.NewLineData(false, []string{ "", "Signature", - toB64(headerHandler.GetSignature())})) + core.ToB64(headerHandler.GetSignature())})) lines = append(lines, display.NewLineData(true, []string{ "", "Root hash", - toB64(headerHandler.GetRootHash())})) + core.ToB64(headerHandler.GetRootHash())})) return lines } -func toHex(buff []byte) string { - if buff == nil { - return "" - } - return "0x" + hex.EncodeToString(buff) -} - -func toB64(buff []byte) string { - if buff == nil { - return "" - } - return base64.StdEncoding.EncodeToString(buff) -} - // checkProcessorNilParameters will check the imput parameters for nil values func checkProcessorNilParameters( accounts state.AccountsAdapter, @@ -201,6 +186,7 @@ func checkProcessorNilParameters( hasher hashing.Hasher, marshalizer marshal.Marshalizer, store dataRetriever.StorageService, + shardCoordinator sharding.Coordinator, ) error { if accounts == nil { @@ -218,6 +204,9 @@ func checkProcessorNilParameters( if store == nil { return process.ErrNilStorage } + if shardCoordinator == nil { + return process.ErrNilShardCoordinator + } return nil } diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index c4d00876958..7e01d177cc1 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -299,6 +299,7 @@ func TestBlockProcessor_CheckBlockValidity(t *testing.T) { &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -370,6 +371,7 @@ func TestVerifyStateRoot_ShouldWork(t *testing.T) { accounts, mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -392,6 +394,7 @@ func TestBlockProcessor_computeHeaderHashMarshalizerFail1ShouldErr(t *testing.T) &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -426,6 +429,7 @@ func TestBlockPorcessor_ComputeNewNoncePrevHashShouldWork(t *testing.T) { &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, diff --git a/process/block/export_test.go b/process/block/export_test.go index b9bf7e12748..b50beb5852d 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -108,10 +108,6 @@ func (sp *shardProcessor) ChRcvAllTxs() chan bool { return sp.chRcvAllTxs } -func (mp *metaProcessor) GetShardHeaderFromPool(shardID uint32, headerHash []byte) (data.HeaderHandler, error) { - return mp.getShardHeaderFromPool(shardID, headerHash) -} - func (mp *metaProcessor) RequestBlockHeaders(header *block.MetaBlock) int { return mp.requestBlockHeaders(header) } diff --git a/process/block/interceptedBlockHeader.go b/process/block/interceptedBlockHeader.go index 301d1a362a0..15be09681ed 100644 --- a/process/block/interceptedBlockHeader.go +++ b/process/block/interceptedBlockHeader.go @@ -44,7 +44,7 @@ func (inHdr *InterceptedHeader) Shard() uint32 { return inHdr.ShardId } -// GetHeader returns the Header pointer that holds the data +// GetShardHeader returns the Header pointer that holds the data func (inHdr *InterceptedHeader) GetHeader() *block.Header { return inHdr.Header } diff --git a/process/block/metablock.go b/process/block/metablock.go index 2a5a32d04e5..7c940dfc94b 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -8,6 +8,7 @@ import ( "sync" "time" + "github.com/ElrondNetwork/elrond-go-sandbox/core" "github.com/ElrondNetwork/elrond-go-sandbox/data" "github.com/ElrondNetwork/elrond-go-sandbox/data/block" "github.com/ElrondNetwork/elrond-go-sandbox/data/state" @@ -65,7 +66,8 @@ func NewMetaProcessor( forkDetector, hasher, marshalizer, - store) + store, + shardCoordinator) if err != nil { return nil, err } @@ -79,9 +81,6 @@ func NewMetaProcessor( if requestHeaderHandler == nil { return nil, process.ErrNilRequestHeaderHandler } - if shardCoordinator == nil { - return nil, process.ErrNilShardCoordinator - } base := &baseProcessor{ accounts: accounts, @@ -302,12 +301,12 @@ func (mp *metaProcessor) processBlockHeaders(header *block.MetaBlock, round int3 return err } - msg = fmt.Sprintf("%s\n%s", msg, toB64(shardMiniBlockHeader.Hash)) + msg = fmt.Sprintf("%s\n%s", msg, core.ToB64(shardMiniBlockHeader.Hash)) } } if len(msg) > 0 { - log.Info(fmt.Sprintf("The following miniblocks hashes were successfully processed:%s\n", msg)) + log.Info(fmt.Sprintf("the following miniblocks hashes were successfully processed:%s\n", msg)) } return nil @@ -391,7 +390,7 @@ func (mp *metaProcessor) CommitBlock( for i := 0; i < len(header.ShardInfo); i++ { shardData := header.ShardInfo[i] - header, err := mp.getShardHeaderFromPool(shardData.ShardId, shardData.HeaderHash) + header, err := process.GetShardHeaderFromPool(shardData.HeaderHash, mp.dataPool.ShardHeaders()) if header == nil { return err } @@ -469,7 +468,7 @@ func (mp *metaProcessor) createLastNotarizedHdrs(header *block.MetaBlock) error for i := 0; i < len(header.ShardInfo); i++ { shardData := header.ShardInfo[i] - header, err := mp.getShardHeaderFromPool(shardData.ShardId, shardData.HeaderHash) + header, err := process.GetShardHeaderFromPool(shardData.HeaderHash, mp.dataPool.ShardHeaders()) if header == nil { return err } @@ -488,7 +487,7 @@ func (mp *metaProcessor) getSortedShardHdrsFromMetablock(header *block.MetaBlock for i := 0; i < len(header.ShardInfo); i++ { shardData := header.ShardInfo[i] - header, err := mp.getShardHeaderFromPool(shardData.ShardId, shardData.HeaderHash) + header, err := process.GetShardHeaderFromPool(shardData.HeaderHash, mp.dataPool.ShardHeaders()) if header == nil { return nil, err } @@ -697,26 +696,6 @@ func (mp *metaProcessor) isShardHeaderValidFinal(currHdr *block.Header, lastHdr return false, nil } -// getHeaderFromPool gets the header from a given shard id and a given header hash -func (mp *metaProcessor) getShardHeaderFromPool(shardID uint32, headerHash []byte) (*block.Header, error) { - headerPool := mp.dataPool.ShardHeaders() - if headerPool == nil { - return nil, process.ErrNilHeadersDataPool - } - - val, ok := headerPool.Peek(headerHash) - if !ok { - return nil, process.ErrMissingHeader - } - - header, ok := val.(*block.Header) - if !ok { - return nil, process.ErrWrongTypeAssertion - } - - return header, nil -} - // receivedHeader is a call back function which is called when a new header // is added in the headers pool func (mp *metaProcessor) receivedHeader(headerHash []byte) { @@ -762,7 +741,7 @@ func (mp *metaProcessor) computeMissingHeaders(header *block.MetaBlock) map[uint for i := 0; i < len(header.ShardInfo); i++ { shardData := header.ShardInfo[i] - header, _ := mp.getShardHeaderFromPool(shardData.ShardId, shardData.HeaderHash) + header, _ := process.GetShardHeaderFromPool(shardData.HeaderHash, mp.dataPool.ShardHeaders()) if header == nil { missingHeaders[shardData.ShardId] = shardData.HeaderHash } @@ -1000,7 +979,7 @@ func (mp *metaProcessor) displayLogInfo( shardMBHeaderCounterMutex.RLock() tblString = tblString + fmt.Sprintf("\nHeader hash: %s\n\nTotal shard MB headers "+ "processed until now: %d. Total shard MB headers processed for this block: %d. Total shard headers remained in pool: %d\n", - toB64(headerHash), + core.ToB64(headerHash), shardMBHeadersTotalProcessed, shardMBHeadersCurrentBlockProcessed, mp.getHeadersCountInPool()) @@ -1056,7 +1035,7 @@ func displayShardInfo(lines []*display.LineData, header *block.MetaBlock) []*dis lines = append(lines, display.NewLineData(false, []string{ "", fmt.Sprintf("ShardMiniBlockHeaderHash_%d", j+1), - toB64(shardData.ShardMiniBlockHeaders[j].Hash)})) + core.ToB64(shardData.ShardMiniBlockHeaders[j].Hash)})) } else if j == 1 { lines = append(lines, display.NewLineData(false, []string{ "", diff --git a/process/block/metablock_test.go b/process/block/metablock_test.go index 70e1d4ef4de..0a63c72adaf 100644 --- a/process/block/metablock_test.go +++ b/process/block/metablock_test.go @@ -836,27 +836,6 @@ func TestMetaProcessor_CommitBlockOkValsShouldWork(t *testing.T) { time.Sleep(time.Second) } -func TestMetaProcessor_GetHeaderFromPool(t *testing.T) { - t.Parallel() - - mdp := initMetaDataPool() - mp, _ := blproc.NewMetaProcessor( - &mock.AccountsStub{}, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.ChainStorerMock{}, - func(shardID uint32, hdrHash []byte) {}, - ) - hdrHash := []byte("hdr_hash1") - hdr, err := mp.GetShardHeaderFromPool(0, hdrHash) - assert.Nil(t, err) - assert.NotNil(t, hdr) - assert.Equal(t, uint64(1), hdr.GetNonce()) -} - func TestBlockProc_RequestTransactionFromNetwork(t *testing.T) { t.Parallel() diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 1cbdb21aefc..0518299547c 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -6,6 +6,7 @@ import ( "sync" "time" + "github.com/ElrondNetwork/elrond-go-sandbox/core" "github.com/ElrondNetwork/elrond-go-sandbox/data" "github.com/ElrondNetwork/elrond-go-sandbox/data/block" "github.com/ElrondNetwork/elrond-go-sandbox/data/state" @@ -30,6 +31,7 @@ type shardProcessor struct { *baseProcessor dataPool dataRetriever.PoolsHolder txProcessor process.TransactionProcessor + blocksTracker process.BlocksTracker chRcvAllTxs chan bool onRequestTransaction func(shardID uint32, txHashes [][]byte) mutRequestedTxHashes sync.RWMutex @@ -49,6 +51,7 @@ func NewShardProcessor( accounts state.AccountsAdapter, shardCoordinator sharding.Coordinator, forkDetector process.ForkDetector, + blocksTracker process.BlocksTracker, requestTransactionHandler func(shardId uint32, txHashes [][]byte), requestMiniBlockHandler func(shardId uint32, miniblockHash []byte), ) (*shardProcessor, error) { @@ -58,7 +61,8 @@ func NewShardProcessor( forkDetector, hasher, marshalizer, - store) + store, + shardCoordinator) if err != nil { return nil, err } @@ -69,8 +73,8 @@ func NewShardProcessor( if txProcessor == nil { return nil, process.ErrNilTxProcessor } - if shardCoordinator == nil { - return nil, process.ErrNilShardCoordinator + if blocksTracker == nil { + return nil, process.ErrNilBlocksTracker } if requestTransactionHandler == nil { return nil, process.ErrNilTransactionHandler @@ -92,6 +96,7 @@ func NewShardProcessor( baseProcessor: base, dataPool: dataPool, txProcessor: txProcessor, + blocksTracker: blocksTracker, } sp.chRcvAllTxs = make(chan bool) @@ -465,6 +470,12 @@ func (sp *shardProcessor) CommitBlock( return err } + sp.blocksTracker.AddBlock(header) + + log.Info(fmt.Sprintf("shardBlock with nonce %d and hash %s has been committed successfully\n", + header.Nonce, + core.ToB64(headerHash))) + errNotCritical := sp.removeTxBlockFromPools(body) if errNotCritical != nil { log.Info(errNotCritical.Error()) @@ -549,10 +560,12 @@ func (sp *shardProcessor) removeMetaBlockFromPool(body block.Body) error { } } - // TODO: the final block should be given by metachain - blockIsFinal := hdr.GetNonce() <= sp.forkDetector.GetHighestFinalBlockNonce() - if processedAll && blockIsFinal { - // metablock was processed adn finalized + //TODO: Should be add here a condition that allows the remove of metachain blocks from pool, only if they are + //final in metachain + if processedAll { + sp.blocksTracker.RemoveNotarisedBlocks(hdr) + + // metablock was processed and finalized buff, err := sp.marshalizer.Marshal(hdr) if err != nil { return err @@ -562,7 +575,8 @@ func (sp *shardProcessor) removeMetaBlockFromPool(body block.Body) error { return err } sp.dataPool.MetaBlocks().Remove(metaBlockKey) - log.Info(fmt.Sprintf("metablock with nonce %d was processed completly and removed from pool\n", hdr.GetNonce())) + log.Info(fmt.Sprintf("metablock with nonce %d has been processed completly and removed from pool\n", + hdr.GetNonce())) } } @@ -651,7 +665,7 @@ func (sp *shardProcessor) receivedMetaBlock(metaBlockHash []byte) { } log.Info(fmt.Sprintf("received metablock with hash %s and nonce %d from network\n", - toB64(metaBlockHash), + core.ToB64(metaBlockHash), hdr.GetNonce())) // TODO: validate the metaheader, through metaprocessor and save only headers with nonce higher than current @@ -1175,7 +1189,7 @@ func (sp *shardProcessor) displayLogInfo( tblString = tblString + fmt.Sprintf("\nHeader hash: %s\n\n"+ "Total txs processed until now: %d. Total txs processed for this block: %d. Total txs remained in pool: %d\n\n"+ "Total shards: %d. Current shard id: %d\n", - toB64(headerHash), + core.ToB64(headerHash), txsTotalProcessed, txsCurrentBlockProcessed, sp.getNrTxsWithDst(header.ShardId), @@ -1243,7 +1257,7 @@ func displayTxBlockBody(lines []*display.LineData, body block.Body) []*display.L lines = append(lines, display.NewLineData(false, []string{ part, fmt.Sprintf("TxHash_%d", j+1), - toB64(miniBlock.TxHashes[j])})) + core.ToB64(miniBlock.TxHashes[j])})) part = "" } else if j == 1 { diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 47a8414a21d..b9427691a20 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -46,6 +46,7 @@ func TestNewBlockProcessor_NilDataPoolShouldErr(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) {}, func(destShardID uint32, mbHash []byte) {}, ) @@ -65,6 +66,7 @@ func TestNewShardProcessor_NilStoreShouldErr(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) {}, func(destShardID uint32, mbHash []byte) {}, ) @@ -84,6 +86,7 @@ func TestNewShardProcessor_NilHasherShouldErr(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -104,6 +107,7 @@ func TestNewShardProcessor_NilMarshalizerShouldWork(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -124,6 +128,7 @@ func TestNewShardProcessor_NilTxProcessorShouldErr(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -144,6 +149,7 @@ func TestNewShardProcessor_NilAccountsAdapterShouldErr(t *testing.T) { nil, mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -164,6 +170,7 @@ func TestNewShardProcessor_NilShardCoordinatorShouldErr(t *testing.T) { initAccountsMock(), nil, &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -184,6 +191,7 @@ func TestNewShardProcessor_NilForkDetectorShouldErr(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), nil, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -192,6 +200,27 @@ func TestNewShardProcessor_NilForkDetectorShouldErr(t *testing.T) { assert.Nil(t, sp) } +func TestNewShardProcessor_NilBlocksTrackerShouldErr(t *testing.T) { + t.Parallel() + tdp := initDataPool() + sp, err := blproc.NewShardProcessor( + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + initAccountsMock(), + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + nil, + func(destShardID uint32, txHashes [][]byte) { + }, + func(destShardID uint32, txHash []byte) {}, + ) + assert.Equal(t, process.ErrNilBlocksTracker, err) + assert.Nil(t, sp) +} + func TestNewShardProcessor_NilRequestTransactionHandlerShouldErr(t *testing.T) { t.Parallel() tdp := initDataPool() @@ -204,6 +233,7 @@ func TestNewShardProcessor_NilRequestTransactionHandlerShouldErr(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, nil, func(destShardID uint32, txHash []byte) {}, ) @@ -226,6 +256,7 @@ func TestNewShardProcessor_NilTransactionPoolShouldErr(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -246,6 +277,7 @@ func TestNewShardProcessor_OkValsShouldWork(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -268,6 +300,7 @@ func TestShardProcessor_ProcessBlockWithNilBlockchainShouldErr(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -289,6 +322,7 @@ func TestShardProcessor_ProcessBlockWithNilHeaderShouldErr(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -310,6 +344,7 @@ func TestShardProcessor_ProcessBlockWithNilBlockBodyShouldErr(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -330,6 +365,7 @@ func TestShardProcessor_ProcessBlockWithNilHaveTimeFuncShouldErr(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -367,6 +403,7 @@ func TestShardProcessor_ProcessWithDirtyAccountShouldErr(t *testing.T) { }, mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -423,6 +460,7 @@ func TestShardProcessor_ProcessBlockWithInvalidTransactionShouldErr(t *testing.T }, mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -447,6 +485,7 @@ func TestShardProcessor_ProcessWithHeaderNotFirstShouldErr(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -477,6 +516,7 @@ func TestShardProcessor_ProcessWithHeaderNotCorrectNonceShouldErr(t *testing.T) initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -507,6 +547,7 @@ func TestShardProcessor_ProcessWithHeaderNotCorrectPrevHashShouldErr(t *testing. initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -583,6 +624,7 @@ func TestShardProcessor_ProcessBlockWithErrOnProcessBlockTransactionsCallShouldR }, mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -649,6 +691,7 @@ func TestShardProcessor_ProcessBlockWithErrOnVerifyStateRootCallShouldRevertStat }, mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -680,6 +723,7 @@ func TestShardProcessor_CommitBlockNilBlockchainShouldErr(t *testing.T) { accounts, mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -729,6 +773,7 @@ func TestShardProcessor_CommitBlockMarshalizerFailForHeaderShouldErr(t *testing. accounts, mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -777,6 +822,7 @@ func TestShardProcessor_CommitBlockStorageFailsForHeaderShouldErr(t *testing.T) accounts, mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -838,6 +884,7 @@ func TestShardProcessor_CommitBlockStorageFailsForBodyShouldErr(t *testing.T) { return nil }, }, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -883,6 +930,7 @@ func TestShardProcessor_CommitBlockNilNoncesDataPoolShouldErr(t *testing.T) { accounts, mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -945,6 +993,7 @@ func TestShardProcessor_CommitBlockNoTxInPoolShouldErr(t *testing.T) { accounts, mock.NewOneShardCoordinatorMock(), fd, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -1032,6 +1081,10 @@ func TestShardProcessor_CommitBlockOkValsShouldWork(t *testing.T) { accounts, mock.NewOneShardCoordinatorMock(), fd, + &mock.BlocksTrackerMock{ + AddBlockCalled: func(headerHandler data.HeaderHandler) { + }, + }, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -1097,6 +1150,7 @@ func TestShardProcessor_GetTransactionFromPool(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -1119,6 +1173,7 @@ func TestShardProcessor_RequestTransactionFromNetwork(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) {}, func(destShardID uint32, txHash []byte) {}, ) @@ -1147,6 +1202,7 @@ func TestShardProcessor_RequestBlockTransactionFromMiniBlockFromNetwork(t *testi initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) {}, func(destShardID uint32, txHash []byte) {}, ) @@ -1180,6 +1236,7 @@ func TestShardProcessor_CreateTxBlockBodyWithDirtyAccStateShouldErr(t *testing.T }, mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -1211,6 +1268,7 @@ func TestShardProcessor_CreateTxBlockBodyWithNoTimeShouldEmptyBlock(t *testing.T }, mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -1252,6 +1310,7 @@ func TestShardProcessor_CreateTxBlockBodyOK(t *testing.T) { }, mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -1273,6 +1332,7 @@ func TestShardProcessor_RemoveBlockTxsFromPoolNilBlockShouldErr(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -1294,6 +1354,7 @@ func TestShardProcessor_RemoveBlockTxsFromPoolOK(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -1328,6 +1389,7 @@ func TestNode_ComputeNewNoncePrevHashShouldWork(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -1417,6 +1479,7 @@ func TestShardProcessor_DisplayLogInfo(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -1600,6 +1663,7 @@ func TestBlockProcessor_CreateBlockHeaderShouldNotReturnNil(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -1623,6 +1687,7 @@ func TestShardProcessor_CreateBlockHeaderShouldErrWhenMarshalizerErrors(t *testi initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -1662,6 +1727,7 @@ func TestShardProcessor_CreateBlockHeaderReturnsOK(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -1709,6 +1775,7 @@ func TestShardProcessor_CommitBlockShouldRevertAccountStateWhenErr(t *testing.T) }, mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -1750,6 +1817,7 @@ func TestShardProcessor_MarshalizedDataToBroadcastShouldWork(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) {}, func(destShardID uint32, txHash []byte) {}, ) @@ -1783,6 +1851,7 @@ func TestShardProcessor_MarshalizedDataWrongType(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -1809,6 +1878,7 @@ func TestShardProcessor_MarshalizedDataNilInput(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -1844,6 +1914,7 @@ func TestShardProcessor_MarshalizedDataMarshalWithoutSuccess(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -1900,6 +1971,7 @@ func TestShardProcessor_GetAllTxsFromMiniBlockShouldWork(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) {}, func(destShardID uint32, txHash []byte) {}, ) @@ -1943,6 +2015,7 @@ func TestShardProcessor_ReceivedTransactionShouldEraseRequested(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) {}, func(destShardID uint32, miniblockHash []byte) {}, ) @@ -2011,6 +2084,7 @@ func TestShardProcessor_ReceivedMiniBlockShouldRequestMissingTransactions(t *tes initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { if containsHash(txHashes, txHash1) { atomic.AddInt32(&txHash1Requested, 1) @@ -2092,6 +2166,7 @@ func TestShardProcessor_ReceivedMetaBlockShouldRequestMissingMiniBlocks(t *testi initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) {}, func(destShardID uint32, miniblockHash []byte) { if bytes.Equal(miniBlockHash1, miniblockHash) { @@ -2197,6 +2272,7 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithOkTxsShouldExecuteThemAndNot }, mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) {}, func(destShardID uint32, miniblockHash []byte) {}, ) @@ -2287,6 +2363,7 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithErrorWhileProcessShouldCallR }, mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) {}, func(destShardID uint32, miniblockHash []byte) {}, ) @@ -2372,6 +2449,7 @@ func TestShardProcessor_CreateMiniBlocksShouldWorkWithIntraShardTxs(t *testing.T }, mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) {}, func(destShardID uint32, miniblockHash []byte) {}, ) @@ -2466,6 +2544,11 @@ func TestShardProcessor_RemoveMetaBlockFromPoolShouldWork(t *testing.T) { return 0 }, }, + &mock.BlocksTrackerMock{ + RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { + return nil + }, + }, func(destShardID uint32, txHashes [][]byte) {}, func(destShardID uint32, miniblockHash []byte) {}, ) @@ -2500,6 +2583,7 @@ func TestBlockProcessor_RestoreBlockIntoPoolsShouldErrNilBlockChain(t *testing.T initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -2521,6 +2605,7 @@ func TestBlockProcessor_RestoreBlockIntoPoolsShouldErrNilTxBlockBody(t *testing. initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -2559,6 +2644,7 @@ func TestShardProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) { @@ -2612,6 +2698,7 @@ func TestShardProcessor_DecodeBlockBody(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, @@ -2642,6 +2729,7 @@ func TestShardProcessor_DecodeBlockHeader(t *testing.T) { initAccountsMock(), mock.NewOneShardCoordinatorMock(), &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, func(destShardID uint32, txHashes [][]byte) { }, func(destShardID uint32, txHash []byte) {}, diff --git a/process/common.go b/process/common.go new file mode 100644 index 00000000000..61540a233aa --- /dev/null +++ b/process/common.go @@ -0,0 +1,90 @@ +package process + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/data/block" + "github.com/ElrondNetwork/elrond-go-sandbox/dataRetriever" + "github.com/ElrondNetwork/elrond-go-sandbox/marshal" + "github.com/ElrondNetwork/elrond-go-sandbox/storage" +) + +// GetShardHeader gets the header, which is associated with the given hash, from pool or storage +func GetShardHeader( + hash []byte, + cacher storage.Cacher, + marshalizer marshal.Marshalizer, + storageService dataRetriever.StorageService, +) (*block.Header, error) { + if cacher == nil { + return nil, ErrNilCacher + } + if marshalizer == nil { + return nil, ErrNilMarshalizer + } + if storageService == nil { + return nil, ErrNilStorage + } + + hdr, err := GetShardHeaderFromPool(hash, cacher) + if err != nil { + hdr, err = GetShardHeaderFromStorage(hash, marshalizer, storageService) + if err != nil { + return nil, err + } + } + + return hdr, err +} + +// GetShardHeaderFromPool gets the header, which is associated with the given hash, from pool +func GetShardHeaderFromPool( + hash []byte, + cacher storage.Cacher, +) (*block.Header, error) { + if cacher == nil { + return nil, ErrNilCacher + } + + hdr, ok := cacher.Peek(hash) + if !ok { + return nil, ErrMissingHeader + } + + header, ok := hdr.(*block.Header) + if !ok { + return nil, ErrWrongTypeAssertion + } + + return header, nil +} + +// GetShardHeaderFromStorage gets the header, which is associated with the given hash, from storage +func GetShardHeaderFromStorage( + hash []byte, + marshalizer marshal.Marshalizer, + storageService dataRetriever.StorageService, +) (*block.Header, error) { + if marshalizer == nil { + return nil, ErrNilMarshalizer + } + if storageService == nil { + return nil, ErrNilStorage + } + + headerStore := storageService.GetStorer(dataRetriever.BlockHeaderUnit) + if headerStore == nil { + return nil, ErrNilHeadersStorage + } + + buffHeader, err := headerStore.Get(hash) + if err != nil { + return nil, ErrMissingHeader + } + + header := &block.Header{} + err = marshalizer.Unmarshal(header, buffHeader) + if err != nil { + return nil, ErrUnmarshalWithoutSuccess + } + + return header, nil +} diff --git a/process/common_test.go b/process/common_test.go new file mode 100644 index 00000000000..1b606e6820b --- /dev/null +++ b/process/common_test.go @@ -0,0 +1,237 @@ +package process_test + +import ( + "bytes" + "testing" + + "github.com/ElrondNetwork/elrond-go-sandbox/data/block" + "github.com/ElrondNetwork/elrond-go-sandbox/dataRetriever" + "github.com/ElrondNetwork/elrond-go-sandbox/process" + "github.com/ElrondNetwork/elrond-go-sandbox/process/mock" + "github.com/ElrondNetwork/elrond-go-sandbox/storage" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" +) + +func TestGetShardHeaderShouldErrNilCacher(t *testing.T) { + hash := []byte("X") + + marshalizer := &mock.MarshalizerMock{} + storageService := &mock.ChainStorerMock{} + + header, err := process.GetShardHeader(hash, nil, marshalizer, storageService) + assert.Nil(t, header) + assert.Equal(t, process.ErrNilCacher, err) +} + +func TestGetShardHeaderShouldErrNilMarshalizer(t *testing.T) { + hash := []byte("X") + + cacher := &mock.CacherStub{} + storageService := &mock.ChainStorerMock{} + + header, err := process.GetShardHeader(hash, cacher, nil, storageService) + assert.Nil(t, header) + assert.Equal(t, process.ErrNilMarshalizer, err) +} + +func TestGetShardHeaderShouldErrNilStorage(t *testing.T) { + hash := []byte("X") + + cacher := &mock.CacherStub{} + marshalizer := &mock.MarshalizerMock{} + + header, err := process.GetShardHeader(hash, cacher, marshalizer, nil) + assert.Nil(t, header) + assert.Equal(t, process.ErrNilStorage, err) +} + +func TestGetShardHeaderShouldGetHeaderFromPool(t *testing.T) { + hash := []byte("X") + + hdr := &block.Header{Nonce: 1} + cacher := &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + return hdr, true + }, + } + marshalizer := &mock.MarshalizerMock{} + storageService := &mock.ChainStorerMock{} + + header, _ := process.GetShardHeader(hash, cacher, marshalizer, storageService) + assert.Equal(t, hdr, header) +} + +func TestGetShardHeaderShouldGetHeaderFromStorage(t *testing.T) { + hash := []byte("X") + + hdr := &block.Header{Nonce: 1} + cacher := &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + return nil, false + }, + } + marshalizer := &mock.MarshalizerMock{} + storageService := &mock.ChainStorerMock{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return &mock.StorerStub{ + GetCalled: func(key []byte) ([]byte, error) { + if bytes.Equal(key, hash) { + return marshalizer.Marshal(hdr) + } + return nil, errors.New("error") + }, + } + }, + } + + header, _ := process.GetShardHeader(hash, cacher, marshalizer, storageService) + assert.Equal(t, hdr, header) +} + +func TestGetShardHeaderFromPoolShouldErrNilCacher(t *testing.T) { + hash := []byte("X") + + header, err := process.GetShardHeaderFromPool(hash, nil) + assert.Nil(t, header) + assert.Equal(t, process.ErrNilCacher, err) +} + +func TestGetShardHeaderFromPoolShouldErrMissingHeader(t *testing.T) { + hash := []byte("X") + + cacher := &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + return nil, false + }, + } + + header, err := process.GetShardHeaderFromPool(hash, cacher) + assert.Nil(t, header) + assert.Equal(t, process.ErrMissingHeader, err) +} + +func TestGetShardHeaderFromPoolShouldErrWrongTypeAssertion(t *testing.T) { + hash := []byte("X") + + cacher := &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + return &block.MetaBlock{}, true + }, + } + + header, err := process.GetShardHeaderFromPool(hash, cacher) + assert.Nil(t, header) + assert.Equal(t, process.ErrWrongTypeAssertion, err) +} + +func TestGetShardHeaderFromPoolShouldWork(t *testing.T) { + hash := []byte("X") + + hdr := &block.Header{Nonce: 10} + cacher := &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + return hdr, true + }, + } + + header, err := process.GetShardHeaderFromPool(hash, cacher) + assert.Nil(t, err) + assert.Equal(t, hdr, header) +} + +func TestGetShardHeaderFromStorageShouldErrNilCacher(t *testing.T) { + hash := []byte("X") + + storageService := &mock.ChainStorerMock{} + + header, err := process.GetShardHeaderFromStorage(hash, nil, storageService) + assert.Nil(t, header) + assert.Equal(t, process.ErrNilMarshalizer, err) +} + +func TestGetShardHeaderFromStorageShouldErrNilStorage(t *testing.T) { + hash := []byte("X") + + marshalizer := &mock.MarshalizerMock{} + + header, err := process.GetShardHeaderFromStorage(hash, marshalizer, nil) + assert.Nil(t, header) + assert.Equal(t, process.ErrNilStorage, err) +} + +func TestGetShardHeaderFromStorageShouldErrNilHeadersStorage(t *testing.T) { + hash := []byte("X") + + marshalizer := &mock.MarshalizerMock{} + storageService := &mock.ChainStorerMock{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return nil + }, + } + + header, err := process.GetShardHeaderFromStorage(hash, marshalizer, storageService) + assert.Nil(t, header) + assert.Equal(t, process.ErrNilHeadersStorage, err) +} + +func TestGetShardHeaderFromStorageShouldErrMissingHeader(t *testing.T) { + hash := []byte("X") + + marshalizer := &mock.MarshalizerMock{} + storageService := &mock.ChainStorerMock{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return &mock.StorerStub{ + GetCalled: func(key []byte) ([]byte, error) { + return nil, errors.New("error") + }, + } + }, + } + + header, err := process.GetShardHeaderFromStorage(hash, marshalizer, storageService) + assert.Nil(t, header) + assert.Equal(t, process.ErrMissingHeader, err) +} + +func TestGetShardHeaderFromStorageShouldErrUnmarshalWithoutSuccess(t *testing.T) { + hash := []byte("X") + + marshalizer := &mock.MarshalizerMock{} + storageService := &mock.ChainStorerMock{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return &mock.StorerStub{ + GetCalled: func(key []byte) ([]byte, error) { + return nil, nil + }, + } + }, + } + + header, err := process.GetShardHeaderFromStorage(hash, marshalizer, storageService) + assert.Nil(t, header) + assert.Equal(t, process.ErrUnmarshalWithoutSuccess, err) +} + +func TestGetShardHeaderFromStorageShouldWork(t *testing.T) { + hash := []byte("X") + + hdr := &block.Header{} + marshalizer := &mock.MarshalizerMock{} + storageService := &mock.ChainStorerMock{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return &mock.StorerStub{ + GetCalled: func(key []byte) ([]byte, error) { + if bytes.Equal(key, hash) { + return marshalizer.Marshal(hdr) + } + return nil, errors.New("error") + }, + } + }, + } + + header, err := process.GetShardHeaderFromStorage(hash, marshalizer, storageService) + assert.Nil(t, err) + assert.Equal(t, hdr, header) +} diff --git a/process/errors.go b/process/errors.go index 8b864a891d1..f997e5a0fce 100644 --- a/process/errors.go +++ b/process/errors.go @@ -97,6 +97,9 @@ var ErrMissingTransaction = errors.New("missing transaction") // ErrMarshalWithoutSuccess signals that marshal some data was not done with success var ErrMarshalWithoutSuccess = errors.New("marshal without success") +// ErrUnmarshalWithoutSuccess signals that unmarshal some data was not done with success +var ErrUnmarshalWithoutSuccess = errors.New("unmarshal without success") + // ErrRootStateMissmatch signals that persist some data was not done with success var ErrRootStateMissmatch = errors.New("root state does not match") @@ -187,6 +190,9 @@ var ErrNilTxStorage = errors.New("nil transaction storage") // ErrNilStorage signals that a nil storage has been provided var ErrNilStorage = errors.New("nil storage") +// ErrNilBlocksTracker signals that a nil blocks tracker has been provided +var ErrNilBlocksTracker = errors.New("nil blocks tracker") + // ErrInvalidTxInPool signals an invalid transaction in the transactions pool var ErrInvalidTxInPool = errors.New("invalid transaction in the transactions pool") diff --git a/process/interface.go b/process/interface.go index 081bf4e1530..21d4994c527 100644 --- a/process/interface.go +++ b/process/interface.go @@ -133,3 +133,12 @@ type ChronologyValidator interface { type DataPacker interface { PackDataInChunks(data [][]byte, limit int) ([][]byte, error) } + +// BlocksTracker defines the functionality to track all the notarised blocks +type BlocksTracker interface { + UnnotarisedBlocks() []data.HeaderHandler + RemoveNotarisedBlocks(headerHandler data.HeaderHandler) error + AddBlock(headerHandler data.HeaderHandler) + SetBlockBroadcastRound(nonce uint64, round int32) + BlockBroadcastRound(nonce uint64) int32 +} diff --git a/process/mock/blocksTrackerMock.go b/process/mock/blocksTrackerMock.go new file mode 100644 index 00000000000..5c516071da0 --- /dev/null +++ b/process/mock/blocksTrackerMock.go @@ -0,0 +1,33 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/data" +) + +type BlocksTrackerMock struct { + UnnotarisedBlocksCalled func() []data.HeaderHandler + RemoveNotarisedBlocksCalled func(headerHandler data.HeaderHandler) error + AddBlockCalled func(headerHandler data.HeaderHandler) + SetBlockBroadcastRoundCalled func(nonce uint64, round int32) + BlockBroadcastRoundCalled func(nonce uint64) int32 +} + +func (btm *BlocksTrackerMock) UnnotarisedBlocks() []data.HeaderHandler { + return btm.UnnotarisedBlocksCalled() +} + +func (btm *BlocksTrackerMock) RemoveNotarisedBlocks(headerHandler data.HeaderHandler) error { + return btm.RemoveNotarisedBlocksCalled(headerHandler) +} + +func (btm *BlocksTrackerMock) AddBlock(headerHandler data.HeaderHandler) { + btm.AddBlockCalled(headerHandler) +} + +func (btm *BlocksTrackerMock) SetBlockBroadcastRound(nonce uint64, round int32) { + btm.SetBlockBroadcastRoundCalled(nonce, round) +} + +func (btm *BlocksTrackerMock) BlockBroadcastRound(nonce uint64) int32 { + return btm.BlockBroadcastRoundCalled(nonce) +} diff --git a/process/sync/baseSync.go b/process/sync/baseSync.go index c7414b99530..ab59371d595 100644 --- a/process/sync/baseSync.go +++ b/process/sync/baseSync.go @@ -2,12 +2,12 @@ package sync import ( "bytes" - "encoding/base64" "fmt" "sync" "time" "github.com/ElrondNetwork/elrond-go-sandbox/consensus" + "github.com/ElrondNetwork/elrond-go-sandbox/core" "github.com/ElrondNetwork/elrond-go-sandbox/core/logger" "github.com/ElrondNetwork/elrond-go-sandbox/data" "github.com/ElrondNetwork/elrond-go-sandbox/data/state" @@ -78,17 +78,12 @@ func (boot *baseBootstrap) requestedHeaderNonce() *uint64 { return boot.headerNonce } -func (boot *baseBootstrap) processReceivedHeader(header data.HeaderHandler, headerHash []byte) { - if header == nil { - log.Info(ErrNilHeader.Error()) - return - } - +func (boot *baseBootstrap) processReceivedHeader(headerHandler data.HeaderHandler, headerHash []byte) { log.Debug(fmt.Sprintf("receivedHeaders: received header with nonce %d and hash %s from network\n", - header.GetNonce(), - toB64(headerHash))) + headerHandler.GetNonce(), + core.ToB64(headerHash))) - err := boot.forkDetector.AddHeader(header, headerHash, process.BHReceived) + err := boot.forkDetector.AddHeader(headerHandler, headerHash, process.BHReceived) if err != nil { log.Info(err.Error()) } @@ -101,7 +96,7 @@ func (boot *baseBootstrap) receivedHeaderNonce(nonce uint64) { log.Debug(fmt.Sprintf("receivedHeaderNonce: received header with nonce %d and hash %s from network\n", nonce, - toB64(headerHash))) + core.ToB64(headerHash))) n := boot.requestedHeaderNonce() if n == nil { @@ -267,11 +262,3 @@ func isRandomSeedValid(header data.HeaderHandler) bool { return !isPrevRandSeedNilOrEmpty && !isRandSeedNilOrEmpty } - -func toB64(buff []byte) string { - if buff == nil { - return "" - } - - return base64.StdEncoding.EncodeToString(buff) -} diff --git a/process/sync/export_test.go b/process/sync/export_test.go index 5dd27021eb7..727276f0ad2 100644 --- a/process/sync/export_test.go +++ b/process/sync/export_test.go @@ -1,6 +1,7 @@ package sync import ( + "github.com/ElrondNetwork/elrond-go-sandbox/data" "github.com/ElrondNetwork/elrond-go-sandbox/data/block" "github.com/ElrondNetwork/elrond-go-sandbox/process" ) @@ -9,12 +10,12 @@ func (boot *ShardBootstrap) RequestHeader(nonce uint64) { boot.requestHeader(nonce) } -func (boot *ShardBootstrap) GetHeaderFromPool(nonce uint64) *block.Header { - return boot.getHeaderFromPoolHavingNonce(nonce) +func (boot *ShardBootstrap) GetHeaderFromPoolWithNonce(nonce uint64) *block.Header { + return boot.getHeaderFromPoolWithNonce(nonce) } -func (boot *MetaBootstrap) GetHeaderFromPool(nonce uint64) *block.MetaBlock { - return boot.getHeaderFromPoolHavingNonce(nonce) +func (boot *MetaBootstrap) GetHeaderFromPoolWithNonce(nonce uint64) *block.MetaBlock { + return boot.getHeaderFromPoolWithNonce(nonce) } func (boot *ShardBootstrap) GetMiniBlocks(hashes [][]byte) interface{} { @@ -120,3 +121,7 @@ func (boot *ShardBootstrap) SetForkNonce(nonce uint64) { func (boot *MetaBootstrap) SetForkNonce(nonce uint64) { boot.forkNonce = nonce } + +func (boot *baseBootstrap) ProcessReceivedHeader(headerHandler data.HeaderHandler, headerHash []byte) { + boot.processReceivedHeader(headerHandler, headerHash) +} diff --git a/process/sync/metablock.go b/process/sync/metablock.go index 487f65ee61a..c47e346b9a8 100644 --- a/process/sync/metablock.go +++ b/process/sync/metablock.go @@ -5,6 +5,7 @@ import ( "time" "github.com/ElrondNetwork/elrond-go-sandbox/consensus" + "github.com/ElrondNetwork/elrond-go-sandbox/core" "github.com/ElrondNetwork/elrond-go-sandbox/data" "github.com/ElrondNetwork/elrond-go-sandbox/data/block" "github.com/ElrondNetwork/elrond-go-sandbox/data/state" @@ -113,51 +114,59 @@ func NewMetaBootstrap( return &boot, nil } -func (boot *MetaBootstrap) getHeader(hash []byte) *block.MetaBlock { - hdr := boot.getHeaderFromPool(hash) - if hdr != nil { - return hdr +func (boot *MetaBootstrap) getHeader(hash []byte) (*block.MetaBlock, error) { + hdr, err := boot.getHeaderFromPool(hash) + if err != nil { + hdr, err = boot.getHeaderFromStorage(hash) + if err != nil { + return nil, err + } } - return boot.getHeaderFromStorage(hash) + return hdr, err } -func (boot *MetaBootstrap) getHeaderFromPool(hash []byte) *block.MetaBlock { +func (boot *MetaBootstrap) getHeaderFromPool(hash []byte) (*block.MetaBlock, error) { hdr, ok := boot.headers.Peek(hash) if !ok { - log.Debug(fmt.Sprintf("header with hash %v not found in headers cache\n", hash)) - return nil + return nil, process.ErrMissingHeader } header, ok := hdr.(*block.MetaBlock) if !ok { - log.Debug(fmt.Sprintf("data with hash %v is not metablock\n", hash)) - return nil + return nil, process.ErrWrongTypeAssertion } - return header + return header, nil } -func (boot *MetaBootstrap) getHeaderFromStorage(hash []byte) *block.MetaBlock { +func (boot *MetaBootstrap) getHeaderFromStorage(hash []byte) (*block.MetaBlock, error) { headerStore := boot.store.GetStorer(dataRetriever.MetaBlockUnit) if headerStore == nil { - log.Error(process.ErrNilHeadersStorage.Error()) - return nil + return nil, process.ErrNilHeadersStorage + } + + buffHeader, err := headerStore.Get(hash) + if err != nil { + return nil, process.ErrMissingHeader } - buffHeader, _ := headerStore.Get(hash) header := &block.MetaBlock{} - err := boot.marshalizer.Unmarshal(header, buffHeader) + err = boot.marshalizer.Unmarshal(header, buffHeader) if err != nil { - log.Error(err.Error()) - return nil + return nil, process.ErrUnmarshalWithoutSuccess } - return header + return header, nil } func (boot *MetaBootstrap) receivedHeader(headerHash []byte) { - header := boot.getHeader(headerHash) + header, err := boot.getHeader(headerHash) + if err != nil { + log.Debug(err.Error()) + return + } + boot.processReceivedHeader(header, headerHash) } @@ -229,17 +238,20 @@ func (boot *MetaBootstrap) SyncBlock() error { return err } + timeBefore := time.Now() err = boot.blkExecutor.CommitBlock(boot.blkc, hdr, blockBody) if err != nil { return err } + timeAfter := time.Now() + log.Info(fmt.Sprintf("time elapsed to commit block: %v sec\n", timeAfter.Sub(timeBefore).Seconds())) - log.Info(fmt.Sprintf("block with nonce %d was synced successfully\n", hdr.Nonce)) + log.Info(fmt.Sprintf("block with nonce %d has been synced successfully\n", hdr.Nonce)) return nil } -// getHeaderFromPoolHavingNonce method returns the block header from a given nonce -func (boot *MetaBootstrap) getHeaderFromPoolHavingNonce(nonce uint64) *block.MetaBlock { +// getHeaderFromPoolWithNonce method returns the block header from a given nonce +func (boot *MetaBootstrap) getHeaderFromPoolWithNonce(nonce uint64) *block.MetaBlock { hash, _ := boot.headersNonces.Get(nonce) if hash == nil { log.Debug(fmt.Sprintf("nonce %d not found in headers-nonces cache\n", nonce)) @@ -248,13 +260,13 @@ func (boot *MetaBootstrap) getHeaderFromPoolHavingNonce(nonce uint64) *block.Met hdr, ok := boot.headers.Peek(hash) if !ok { - log.Debug(fmt.Sprintf("header with hash %v not found in headers cache\n", hash)) + log.Debug(fmt.Sprintf("header with hash %s not found in headers cache\n", core.ToB64(hash))) return nil } header, ok := hdr.(*block.MetaBlock) if !ok { - log.Debug(fmt.Sprintf("header with hash %v not found in headers cache\n", hash)) + log.Debug(fmt.Sprintf("data with hash %s is not metablock\n", core.ToB64(hash))) return nil } @@ -276,13 +288,13 @@ func (boot *MetaBootstrap) requestHeader(nonce uint64) { // getHeaderWithNonce method gets the header with given nonce from pool, if it exist there, // and if not it will be requested from network func (boot *MetaBootstrap) getHeaderRequestingIfMissing(nonce uint64) (*block.MetaBlock, error) { - hdr := boot.getHeaderFromPoolHavingNonce(nonce) + hdr := boot.getHeaderFromPoolWithNonce(nonce) if hdr == nil { emptyChannel(boot.chRcvHdr) boot.requestHeader(nonce) boot.waitForHeaderNonce() - hdr = boot.getHeaderFromPoolHavingNonce(nonce) + hdr = boot.getHeaderFromPoolWithNonce(nonce) if hdr == nil { return nil, process.ErrMissingHeader } @@ -302,7 +314,7 @@ func (boot *MetaBootstrap) forkChoice() error { } msg := fmt.Sprintf("roll back to header with nonce %d and hash %s", - header.GetNonce()-1, toB64(header.GetPrevHash())) + header.GetNonce()-1, core.ToB64(header.GetPrevHash())) isSigned := isSigned(header) if isSigned { @@ -380,10 +392,13 @@ func (boot *MetaBootstrap) rollback(header *block.MetaBlock) error { func (boot *MetaBootstrap) getPrevHeader(headerStore storage.Storer, header *block.MetaBlock) (*block.MetaBlock, error) { prevHash := header.GetPrevHash() - buffHeader, _ := headerStore.Get(prevHash) - newHeader := &block.MetaBlock{} + buffHeader, err := headerStore.Get(prevHash) + if err != nil { + return nil, err + } - err := boot.marshalizer.Unmarshal(newHeader, buffHeader) + newHeader := &block.MetaBlock{} + err = boot.marshalizer.Unmarshal(newHeader, buffHeader) if err != nil { return nil, err } diff --git a/process/sync/metablock_test.go b/process/sync/metablock_test.go index 1f872c401df..a6a5ac4f340 100644 --- a/process/sync/metablock_test.go +++ b/process/sync/metablock_test.go @@ -1295,7 +1295,7 @@ func TestMetaBootstrap_GetHeaderFromPoolShouldReturnNil(t *testing.T) { account, ) - assert.Nil(t, bs.GetHeaderFromPool(0)) + assert.Nil(t, bs.GetHeaderFromPoolWithNonce(0)) } func TestMetaBootstrap_GetHeaderFromPoolShouldReturnHeader(t *testing.T) { @@ -1357,7 +1357,7 @@ func TestMetaBootstrap_GetHeaderFromPoolShouldReturnHeader(t *testing.T) { account, ) - assert.True(t, hdr == bs.GetHeaderFromPool(0)) + assert.True(t, hdr == bs.GetHeaderFromPoolWithNonce(0)) } //------- testing received headers diff --git a/process/sync/shardblock.go b/process/sync/shardblock.go index 0bdef0f953d..1730889bd96 100644 --- a/process/sync/shardblock.go +++ b/process/sync/shardblock.go @@ -5,6 +5,7 @@ import ( "time" "github.com/ElrondNetwork/elrond-go-sandbox/consensus" + "github.com/ElrondNetwork/elrond-go-sandbox/core" "github.com/ElrondNetwork/elrond-go-sandbox/data" "github.com/ElrondNetwork/elrond-go-sandbox/data/block" "github.com/ElrondNetwork/elrond-go-sandbox/data/state" @@ -129,52 +130,13 @@ func NewShardBootstrap( return &boot, nil } -func (boot *ShardBootstrap) getHeader(hash []byte) *block.Header { - hdr := boot.getHeaderFromPool(hash) - if hdr != nil { - return hdr - } - - return boot.getHeaderFromStorage(hash) -} - -func (boot *ShardBootstrap) getHeaderFromPool(hash []byte) *block.Header { - hdr, ok := boot.headers.Peek(hash) - if !ok { - log.Debug(fmt.Sprintf("header with hash %v not found in headers cache\n", hash)) - return nil - } - - header, ok := hdr.(*block.Header) - if !ok { - log.Debug(fmt.Sprintf("header with hash %v not found in headers cache\n", hash)) - return nil - } - - return header -} - -func (boot *ShardBootstrap) getHeaderFromStorage(hash []byte) *block.Header { - headerStore := boot.store.GetStorer(dataRetriever.BlockHeaderUnit) - - if headerStore == nil { - log.Error(process.ErrNilHeadersStorage.Error()) - return nil - } - - buffHeader, _ := headerStore.Get(hash) - header := &block.Header{} - err := boot.marshalizer.Unmarshal(header, buffHeader) +func (boot *ShardBootstrap) receivedHeaders(headerHash []byte) { + header, err := process.GetShardHeader(headerHash, boot.headers, boot.marshalizer, boot.store) if err != nil { - log.Error(err.Error()) - return nil + log.Debug(err.Error()) + return } - return header -} - -func (boot *ShardBootstrap) receivedHeaders(headerHash []byte) { - header := boot.getHeader(headerHash) boot.processReceivedHeader(header, headerHash) } @@ -192,7 +154,7 @@ func (boot *ShardBootstrap) receivedBodyHash(hash []byte) { boot.requestedHashes.SetReceivedHash(hash) if boot.requestedHashes.ReceivedAll() { - log.Info(fmt.Sprintf("received requested txBlockBody with hash %s from network\n", toB64(hash))) + log.Info(fmt.Sprintf("received requested txBlockBody with hash %s from network\n", core.ToB64(hash))) boot.setRequestedMiniBlocks(nil) boot.chRcvMiniBlocks <- true } @@ -288,17 +250,20 @@ func (boot *ShardBootstrap) SyncBlock() error { return err } + timeBefore := time.Now() err = boot.blkExecutor.CommitBlock(boot.blkc, hdr, blockBody) if err != nil { return err } + timeAfter := time.Now() + log.Info(fmt.Sprintf("time elapsed to commit block: %v sec\n", timeAfter.Sub(timeBefore).Seconds())) - log.Info(fmt.Sprintf("block with nonce %d was synced successfully\n", hdr.Nonce)) + log.Info(fmt.Sprintf("block with nonce %d has been synced successfully\n", hdr.Nonce)) return nil } -// getHeaderFromPoolHavingNonce method returns the block header from a given nonce -func (boot *ShardBootstrap) getHeaderFromPoolHavingNonce(nonce uint64) *block.Header { +// getHeaderFromPoolWithNonce method returns the block header from a given nonce +func (boot *ShardBootstrap) getHeaderFromPoolWithNonce(nonce uint64) *block.Header { hash, _ := boot.headersNonces.Get(nonce) if hash == nil { log.Debug(fmt.Sprintf("nonce %d not found in headers-nonces cache\n", nonce)) @@ -307,13 +272,13 @@ func (boot *ShardBootstrap) getHeaderFromPoolHavingNonce(nonce uint64) *block.He hdr, ok := boot.headers.Peek(hash) if !ok { - log.Debug(fmt.Sprintf("header with hash %v not found in headers cache\n", hash)) + log.Debug(fmt.Sprintf("header with hash %s not found in headers cache\n", core.ToB64(hash))) return nil } header, ok := hdr.(*block.Header) if !ok { - log.Debug(fmt.Sprintf("header with hash %v not found in headers cache\n", hash)) + log.Debug(fmt.Sprintf("data with hash %s is not header\n", core.ToB64(hash))) return nil } @@ -335,13 +300,13 @@ func (boot *ShardBootstrap) requestHeader(nonce uint64) { // getHeaderWithNonce method gets the header with given nonce from pool, if it exist there, // and if not it will be requested from network func (boot *ShardBootstrap) getHeaderRequestingIfMissing(nonce uint64) (*block.Header, error) { - hdr := boot.getHeaderFromPoolHavingNonce(nonce) + hdr := boot.getHeaderFromPoolWithNonce(nonce) if hdr == nil { emptyChannel(boot.chRcvHdr) boot.requestHeader(nonce) boot.waitForHeaderNonce() - hdr = boot.getHeaderFromPoolHavingNonce(nonce) + hdr = boot.getHeaderFromPoolWithNonce(nonce) if hdr == nil { return nil, process.ErrMissingHeader } @@ -354,14 +319,14 @@ func (boot *ShardBootstrap) getHeaderRequestingIfMissing(nonce uint64) (*block.H func (boot *ShardBootstrap) requestMiniBlocks(hashes [][]byte) { buff, err := boot.marshalizer.Marshal(hashes) if err != nil { - log.Error("Could not marshal MiniBlock hashes: ", err.Error()) + log.Error("could not marshal MiniBlock hashes: ", err.Error()) return } boot.setRequestedMiniBlocks(hashes) err = boot.miniBlockResolver.RequestDataFromHashArray(hashes) - log.Info(fmt.Sprintf("requested tx body with hash %s from network\n", toB64(buff))) + log.Info(fmt.Sprintf("requested tx body with hash %s from network\n", core.ToB64(buff))) if err != nil { log.Error(err.Error()) } @@ -408,7 +373,7 @@ func (boot *ShardBootstrap) forkChoice() error { } msg := fmt.Sprintf("roll back to header with nonce %d and hash %s", - header.GetNonce()-1, toB64(header.GetPrevHash())) + header.GetNonce()-1, core.ToB64(header.GetPrevHash())) isSigned := isSigned(header) if isSigned { @@ -502,10 +467,13 @@ func (boot *ShardBootstrap) rollback(header *block.Header) error { func (boot *ShardBootstrap) getPrevHeader(headerStore storage.Storer, header *block.Header) (*block.Header, error) { prevHash := header.PrevHash - buffHeader, _ := headerStore.Get(prevHash) - newHeader := &block.Header{} + buffHeader, err := headerStore.Get(prevHash) + if err != nil { + return nil, err + } - err := boot.marshalizer.Unmarshal(newHeader, buffHeader) + newHeader := &block.Header{} + err = boot.marshalizer.Unmarshal(newHeader, buffHeader) if err != nil { return nil, err } diff --git a/process/sync/shardblock_test.go b/process/sync/shardblock_test.go index e28549625a8..c90830d2b03 100644 --- a/process/sync/shardblock_test.go +++ b/process/sync/shardblock_test.go @@ -1633,7 +1633,7 @@ func TestBootstrap_GetHeaderFromPoolShouldReturnNil(t *testing.T) { account, ) - assert.Nil(t, bs.GetHeaderFromPool(0)) + assert.Nil(t, bs.GetHeaderFromPoolWithNonce(0)) } func TestBootstrap_GetHeaderFromPoolShouldReturnHeader(t *testing.T) { @@ -1695,7 +1695,7 @@ func TestBootstrap_GetHeaderFromPoolShouldReturnHeader(t *testing.T) { account, ) - assert.True(t, hdr == bs.GetHeaderFromPool(0)) + assert.True(t, hdr == bs.GetHeaderFromPoolWithNonce(0)) } func TestShardGetBlockFromPoolShouldReturnBlock(t *testing.T) { diff --git a/process/track/metaBlock.go b/process/track/metaBlock.go new file mode 100644 index 00000000000..d1959820830 --- /dev/null +++ b/process/track/metaBlock.go @@ -0,0 +1,38 @@ +package track + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/data" +) + +// metaBlockTracker implements NotarisedBlocksTracker interface which tracks notarised blocks +type metaBlockTracker struct { +} + +// NewMetaBlockTracker creates a new metaBlockTracker object +func NewMetaBlockTracker() (*metaBlockTracker, error) { + mbt := metaBlockTracker{} + return &mbt, nil +} + +// UnnotarisedBlocks gets all the blocks which are not notarised yet +func (mbt *metaBlockTracker) UnnotarisedBlocks() []data.HeaderHandler { + return make([]data.HeaderHandler, 0) +} + +// RemoveNotarisedBlocks removes all the blocks which already have been notarised +func (mbt *metaBlockTracker) RemoveNotarisedBlocks(headerHandler data.HeaderHandler) error { + return nil +} + +// AddBlock adds new block to be tracked +func (mbt *metaBlockTracker) AddBlock(headerHandler data.HeaderHandler) { +} + +// SetBlockBroadcastRound sets the round in which the block with the given nonce has been broadcast +func (mbt *metaBlockTracker) SetBlockBroadcastRound(nonce uint64, round int32) { +} + +// BlockBroadcastRound gets the round in which the block with given nonce has been broadcast +func (mbt *metaBlockTracker) BlockBroadcastRound(nonce uint64) int32 { + return 0 +} diff --git a/process/track/metaBlock_test.go b/process/track/metaBlock_test.go new file mode 100644 index 00000000000..cc6c818fc3b --- /dev/null +++ b/process/track/metaBlock_test.go @@ -0,0 +1,41 @@ +package track_test + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go-sandbox/data" + "github.com/ElrondNetwork/elrond-go-sandbox/data/block" + "github.com/ElrondNetwork/elrond-go-sandbox/process/track" + "github.com/stretchr/testify/assert" +) + +func TestMetaBlockTracker_NewMetaBlockTrackerShouldWork(t *testing.T) { + t.Parallel() + + mbt, err := track.NewMetaBlockTracker() + assert.Nil(t, err) + assert.NotNil(t, mbt) +} + +func TestMetaBlockTracker_UnnotarisedBlocksShouldWork(t *testing.T) { + t.Parallel() + + mbt, _ := track.NewMetaBlockTracker() + headers := mbt.UnnotarisedBlocks() + assert.Equal(t, make([]data.HeaderHandler, 0), headers) +} + +func TestMetaBlockTracker_BlockBroadcastRoundShouldWork(t *testing.T) { + t.Parallel() + + mbt, _ := track.NewMetaBlockTracker() + assert.Equal(t, int32(0), mbt.BlockBroadcastRound(1)) +} + +func TestMetaBlockTracker_RemoveNotarisedBlocksShouldWork(t *testing.T) { + t.Parallel() + + mbt, _ := track.NewMetaBlockTracker() + err := mbt.RemoveNotarisedBlocks(&block.MetaBlock{}) + assert.Nil(t, err) +} diff --git a/process/track/shardBlock.go b/process/track/shardBlock.go new file mode 100644 index 00000000000..9ec825ac764 --- /dev/null +++ b/process/track/shardBlock.go @@ -0,0 +1,160 @@ +package track + +import ( + "fmt" + "sync" + + "github.com/ElrondNetwork/elrond-go-sandbox/core" + "github.com/ElrondNetwork/elrond-go-sandbox/core/logger" + "github.com/ElrondNetwork/elrond-go-sandbox/data" + "github.com/ElrondNetwork/elrond-go-sandbox/data/block" + "github.com/ElrondNetwork/elrond-go-sandbox/dataRetriever" + "github.com/ElrondNetwork/elrond-go-sandbox/marshal" + "github.com/ElrondNetwork/elrond-go-sandbox/process" + "github.com/ElrondNetwork/elrond-go-sandbox/sharding" +) + +var log = logger.DefaultLogger() + +type headerInfo struct { + header data.HeaderHandler + broadcastInRound int32 +} + +// shardBlockTracker implements NotarisedBlocksTracker interface which tracks notarised blocks +type shardBlockTracker struct { + dataPool dataRetriever.PoolsHolder + marshalizer marshal.Marshalizer + shardCoordinator sharding.Coordinator + store dataRetriever.StorageService + + mutUnnotarisedHeaders sync.RWMutex + unnotarisedHeaders map[uint64]*headerInfo +} + +// NewShardBlockTracker creates a new shardBlockTracker object +func NewShardBlockTracker( + dataPool dataRetriever.PoolsHolder, + marshalizer marshal.Marshalizer, + shardCoordinator sharding.Coordinator, + store dataRetriever.StorageService, +) (*shardBlockTracker, error) { + err := checkTrackerNilParameters( + dataPool, + marshalizer, + shardCoordinator, + store) + if err != nil { + return nil, err + } + + sbt := shardBlockTracker{ + dataPool: dataPool, + marshalizer: marshalizer, + shardCoordinator: shardCoordinator, + store: store, + } + + sbt.unnotarisedHeaders = make(map[uint64]*headerInfo) + + return &sbt, nil +} + +// checkTrackerNilParameters will check the imput parameters for nil values +func checkTrackerNilParameters( + dataPool dataRetriever.PoolsHolder, + marshalizer marshal.Marshalizer, + shardCoordinator sharding.Coordinator, + store dataRetriever.StorageService, +) error { + if dataPool == nil { + return process.ErrNilDataPoolHolder + } + if marshalizer == nil { + return process.ErrNilMarshalizer + } + if shardCoordinator == nil { + return process.ErrNilShardCoordinator + } + if store == nil { + return process.ErrNilStorage + } + + return nil +} + +// AddBlock adds new block to be tracked +func (sbt *shardBlockTracker) AddBlock(headerHandler data.HeaderHandler) { + sbt.mutUnnotarisedHeaders.Lock() + sbt.unnotarisedHeaders[headerHandler.GetNonce()] = &headerInfo{header: headerHandler, broadcastInRound: 0} + sbt.mutUnnotarisedHeaders.Unlock() +} + +// RemoveNotarisedBlocks removes all the blocks which already have been notarised +func (sbt *shardBlockTracker) RemoveNotarisedBlocks(headerHandler data.HeaderHandler) error { + metaBlock, ok := headerHandler.(*block.MetaBlock) + if !ok { + return process.ErrWrongTypeAssertion + } + + for _, shardData := range metaBlock.ShardInfo { + if shardData.ShardId != sbt.shardCoordinator.SelfId() { + continue + } + + header, err := process.GetShardHeader(shardData.HeaderHash, sbt.dataPool.Headers(), sbt.marshalizer, sbt.store) + if err != nil { + continue + } + + log.Info(fmt.Sprintf("shardBlock with nonce %d and hash %s has been notarised by metachain\n", + header.GetNonce(), + core.ToB64(shardData.HeaderHash))) + + sbt.mutUnnotarisedHeaders.Lock() + delete(sbt.unnotarisedHeaders, header.Nonce) + sbt.mutUnnotarisedHeaders.Unlock() + } + + return nil +} + +// UnnotarisedBlocks gets all the blocks which are not notarised yet +func (sbt *shardBlockTracker) UnnotarisedBlocks() []data.HeaderHandler { + sbt.mutUnnotarisedHeaders.RLock() + + hdrs := make([]data.HeaderHandler, 0) + for _, hInfo := range sbt.unnotarisedHeaders { + hdrs = append(hdrs, hInfo.header) + } + + sbt.mutUnnotarisedHeaders.RUnlock() + + return hdrs +} + +// SetBlockBroadcastRound sets the round in which the block with the given nonce has been broadcast +func (sbt *shardBlockTracker) SetBlockBroadcastRound(nonce uint64, round int32) { + sbt.mutUnnotarisedHeaders.Lock() + + hInfo := sbt.unnotarisedHeaders[nonce] + if hInfo != nil { + hInfo.broadcastInRound = round + sbt.unnotarisedHeaders[nonce] = hInfo + } + + sbt.mutUnnotarisedHeaders.Unlock() +} + +// BlockBroadcastRound gets the round in which the block with given nonce has been broadcast +func (sbt *shardBlockTracker) BlockBroadcastRound(nonce uint64) int32 { + sbt.mutUnnotarisedHeaders.RLock() + hInfo := sbt.unnotarisedHeaders[nonce] + sbt.mutUnnotarisedHeaders.RUnlock() + + if hInfo == nil { + return 0 + } + + return hInfo.broadcastInRound +} diff --git a/process/track/shardBlock_test.go b/process/track/shardBlock_test.go new file mode 100644 index 00000000000..e0b939a7bb2 --- /dev/null +++ b/process/track/shardBlock_test.go @@ -0,0 +1,206 @@ +package track_test + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go-sandbox/data/block" + "github.com/ElrondNetwork/elrond-go-sandbox/process" + "github.com/ElrondNetwork/elrond-go-sandbox/process/mock" + "github.com/ElrondNetwork/elrond-go-sandbox/process/track" + "github.com/ElrondNetwork/elrond-go-sandbox/storage" + "github.com/stretchr/testify/assert" +) + +func TestNewShardBlockTracker_NilDataPoolShouldErr(t *testing.T) { + t.Parallel() + + marshalizer := &mock.MarshalizerMock{} + shardCoordinator := mock.NewOneShardCoordinatorMock() + store := &mock.ChainStorerMock{} + + mbt, err := track.NewShardBlockTracker(nil, marshalizer, shardCoordinator, store) + assert.Nil(t, mbt) + assert.Equal(t, process.ErrNilDataPoolHolder, err) +} + +func TestNewShardBlockTracker_NilMarshalizerShouldErr(t *testing.T) { + t.Parallel() + + pools := &mock.PoolsHolderStub{} + shardCoordinator := mock.NewOneShardCoordinatorMock() + store := &mock.ChainStorerMock{} + + mbt, err := track.NewShardBlockTracker(pools, nil, shardCoordinator, store) + assert.Nil(t, mbt) + assert.Equal(t, process.ErrNilMarshalizer, err) +} + +func TestNewShardBlockTracker_NilShardCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + pools := &mock.PoolsHolderStub{} + marshalizer := &mock.MarshalizerMock{} + store := &mock.ChainStorerMock{} + + mbt, err := track.NewShardBlockTracker(pools, marshalizer, nil, store) + assert.Nil(t, mbt) + assert.Equal(t, process.ErrNilShardCoordinator, err) +} + +func TestNewShardBlockTracker_NilStoreShouldErr(t *testing.T) { + t.Parallel() + + pools := &mock.PoolsHolderStub{} + marshalizer := &mock.MarshalizerMock{} + shardCoordinator := mock.NewOneShardCoordinatorMock() + + mbt, err := track.NewShardBlockTracker(pools, marshalizer, shardCoordinator, nil) + assert.Nil(t, mbt) + assert.Equal(t, process.ErrNilStorage, err) +} + +func TestNewShardBlockTracker_OkValsShouldWork(t *testing.T) { + t.Parallel() + + pools := &mock.PoolsHolderStub{} + marshalizer := &mock.MarshalizerMock{} + shardCoordinator := mock.NewOneShardCoordinatorMock() + store := &mock.ChainStorerMock{} + + mbt, err := track.NewShardBlockTracker(pools, marshalizer, shardCoordinator, store) + assert.Nil(t, err) + assert.NotNil(t, mbt) +} + +func TestShardBlockTracker_AddBlockShouldWork(t *testing.T) { + t.Parallel() + + pools := &mock.PoolsHolderStub{} + marshalizer := &mock.MarshalizerMock{} + shardCoordinator := mock.NewOneShardCoordinatorMock() + store := &mock.ChainStorerMock{} + + mbt, _ := track.NewShardBlockTracker(pools, marshalizer, shardCoordinator, store) + hdr1 := &block.Header{Nonce: 2} + mbt.AddBlock(hdr1) + hdr2 := &block.Header{Nonce: 3} + mbt.AddBlock(hdr2) + headers := mbt.UnnotarisedBlocks() + assert.Equal(t, 2, len(headers)) +} + +func TestShardBlockTracker_SetBlockBroadcastRoundShoudNotSetRoundWhenNonceDoesNotExist(t *testing.T) { + t.Parallel() + + pools := &mock.PoolsHolderStub{} + marshalizer := &mock.MarshalizerMock{} + shardCoordinator := mock.NewOneShardCoordinatorMock() + store := &mock.ChainStorerMock{} + + mbt, _ := track.NewShardBlockTracker(pools, marshalizer, shardCoordinator, store) + hdr := &block.Header{Nonce: 2} + mbt.AddBlock(hdr) + mbt.SetBlockBroadcastRound(1, 10) + assert.Equal(t, int32(0), mbt.BlockBroadcastRound(1)) +} + +func TestShardBlockTracker_SetBlockBroadcastRoundShoudSetRound(t *testing.T) { + t.Parallel() + + pools := &mock.PoolsHolderStub{} + marshalizer := &mock.MarshalizerMock{} + shardCoordinator := mock.NewOneShardCoordinatorMock() + store := &mock.ChainStorerMock{} + + mbt, _ := track.NewShardBlockTracker(pools, marshalizer, shardCoordinator, store) + hdr := &block.Header{Nonce: 2} + mbt.AddBlock(hdr) + mbt.SetBlockBroadcastRound(2, 10) + assert.Equal(t, int32(10), mbt.BlockBroadcastRound(2)) +} + +func TestShardBlockTracker_RemoveNotarisedBlocksShouldErrWrongTypeAssertion(t *testing.T) { + t.Parallel() + + pools := &mock.PoolsHolderStub{} + marshalizer := &mock.MarshalizerMock{} + shardCoordinator := mock.NewOneShardCoordinatorMock() + store := &mock.ChainStorerMock{} + + mbt, _ := track.NewShardBlockTracker(pools, marshalizer, shardCoordinator, store) + err := mbt.RemoveNotarisedBlocks(nil) + assert.Equal(t, process.ErrWrongTypeAssertion, err) +} + +func TestShardBlockTracker_RemoveNotarisedBlocksShouldNotRemoveIfShardIdIsNotSelf(t *testing.T) { + t.Parallel() + + pools := &mock.PoolsHolderStub{} + marshalizer := &mock.MarshalizerMock{} + shardCoordinator := mock.NewOneShardCoordinatorMock() + store := &mock.ChainStorerMock{} + + mbt, _ := track.NewShardBlockTracker(pools, marshalizer, shardCoordinator, store) + metaBlock := &block.MetaBlock{} + shardInfo := make([]block.ShardData, 0) + sd := block.ShardData{ShardId: 1, HeaderHash: []byte("1")} + shardInfo = append(shardInfo, sd) + metaBlock.ShardInfo = shardInfo + header := &block.Header{Nonce: 1} + mbt.AddBlock(header) + _ = mbt.RemoveNotarisedBlocks(metaBlock) + assert.Equal(t, 1, len(mbt.UnnotarisedBlocks())) +} + +func TestShardBlockTracker_RemoveNotarisedBlocksShouldNotRemoveIfGetShardHeaderErr(t *testing.T) { + t.Parallel() + + pools := &mock.PoolsHolderStub{ + HeadersCalled: func() storage.Cacher { + return nil + }, + } + marshalizer := &mock.MarshalizerMock{} + shardCoordinator := mock.NewOneShardCoordinatorMock() + store := &mock.ChainStorerMock{} + + mbt, _ := track.NewShardBlockTracker(pools, marshalizer, shardCoordinator, store) + metaBlock := &block.MetaBlock{} + shardInfo := make([]block.ShardData, 0) + sd := block.ShardData{ShardId: 0, HeaderHash: []byte("1")} + shardInfo = append(shardInfo, sd) + metaBlock.ShardInfo = shardInfo + header := &block.Header{Nonce: 1} + mbt.AddBlock(header) + _ = mbt.RemoveNotarisedBlocks(metaBlock) + assert.Equal(t, 1, len(mbt.UnnotarisedBlocks())) +} + +func TestShardBlockTracker_RemoveNotarisedBlocksShouldWork(t *testing.T) { + t.Parallel() + + header := &block.Header{Nonce: 1} + + pools := &mock.PoolsHolderStub{ + HeadersCalled: func() storage.Cacher { + return &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + return header, true + }, + } + }, + } + marshalizer := &mock.MarshalizerMock{} + shardCoordinator := mock.NewOneShardCoordinatorMock() + store := &mock.ChainStorerMock{} + + mbt, _ := track.NewShardBlockTracker(pools, marshalizer, shardCoordinator, store) + metaBlock := &block.MetaBlock{} + shardInfo := make([]block.ShardData, 0) + sd := block.ShardData{ShardId: 0, HeaderHash: []byte("1")} + shardInfo = append(shardInfo, sd) + metaBlock.ShardInfo = shardInfo + mbt.AddBlock(header) + _ = mbt.RemoveNotarisedBlocks(metaBlock) + assert.Equal(t, 0, len(mbt.UnnotarisedBlocks())) +}